fd2ea0a79f
This effectively "flips the switch" by making the core networking and multiqueue-aware drivers use the new TX multiqueue structures. Non-multiqueue drivers need no changes. The interfaces they use such as netif_stop_queue() degenerate into an operation on TX queue zero. So everything "just works" for them. Code that really wants to do "X" to all TX queues now invokes a routine that does so, such as netif_tx_wake_all_queues(), netif_tx_stop_all_queues(), etc. pktgen and netpoll required a little bit more surgery than the others. In particular the pktgen changes, whilst functional, could be largely improved. The initial check in pktgen_xmit() will sometimes check the wrong queue, which is mostly harmless. The thing to do is probably to invoke fill_packet() earlier. The bulk of the netpoll changes is to make the code operate solely on the TX queue indicated by by the SKB queue mapping. Setting of the SKB queue mapping is entirely confined inside of net/core/dev.c:dev_pick_tx(). If we end up needing any kind of special semantics (drops, for example) it will be implemented here. Finally, we now have a "real_num_tx_queues" which is where the driver indicates how many TX queues are actually active. With IGB changes from Jeff Kirsher. Signed-off-by: David S. Miller <davem@davemloft.net>
980 lines
33 KiB
C
980 lines
33 KiB
C
/*******************************************************************************
|
|
|
|
Intel 10 Gigabit PCI Express Linux driver
|
|
Copyright(c) 1999 - 2007 Intel Corporation.
|
|
|
|
This program is free software; you can redistribute it and/or modify it
|
|
under the terms and conditions of the GNU General Public License,
|
|
version 2, as published by the Free Software Foundation.
|
|
|
|
This program is distributed in the hope it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
The full GNU General Public License is included in this distribution in
|
|
the file called "COPYING".
|
|
|
|
Contact Information:
|
|
Linux NICS <linux.nics@intel.com>
|
|
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
|
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*******************************************************************************/
|
|
|
|
/* ethtool support for ixgbe */
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include "ixgbe.h"
|
|
|
|
|
|
#define IXGBE_ALL_RAR_ENTRIES 16
|
|
|
|
struct ixgbe_stats {
|
|
char stat_string[ETH_GSTRING_LEN];
|
|
int sizeof_stat;
|
|
int stat_offset;
|
|
};
|
|
|
|
#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
|
|
offsetof(struct ixgbe_adapter, m)
|
|
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
|
|
{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
|
|
{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
|
|
{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
|
|
{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
|
|
{"lsc_int", IXGBE_STAT(lsc_int)},
|
|
{"tx_busy", IXGBE_STAT(tx_busy)},
|
|
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
|
|
{"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
|
|
{"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
|
|
{"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
|
|
{"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
|
|
{"multicast", IXGBE_STAT(net_stats.multicast)},
|
|
{"broadcast", IXGBE_STAT(stats.bprc)},
|
|
{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
|
|
{"collisions", IXGBE_STAT(net_stats.collisions)},
|
|
{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
|
|
{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
|
|
{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
|
|
{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
|
|
{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
|
|
{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
|
|
{"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
|
|
{"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
|
|
{"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
|
|
{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
|
|
{"tx_restart_queue", IXGBE_STAT(restart_queue)},
|
|
{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
|
|
{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
|
|
{"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
|
|
{"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
|
|
{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
|
|
{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
|
|
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
|
|
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
|
|
{"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
|
|
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
|
|
{"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
|
|
{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
|
|
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
|
|
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
|
|
{"lro_aggregated", IXGBE_STAT(lro_aggregated)},
|
|
{"lro_flushed", IXGBE_STAT(lro_flushed)},
|
|
};
|
|
|
|
#define IXGBE_QUEUE_STATS_LEN \
|
|
((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
|
|
((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
|
|
(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
|
|
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
|
|
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
|
|
|
|
static int ixgbe_get_settings(struct net_device *netdev,
|
|
struct ethtool_cmd *ecmd)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
u32 link_speed = 0;
|
|
bool link_up;
|
|
|
|
ecmd->supported = SUPPORTED_10000baseT_Full;
|
|
ecmd->autoneg = AUTONEG_ENABLE;
|
|
ecmd->transceiver = XCVR_EXTERNAL;
|
|
if (hw->phy.media_type == ixgbe_media_type_copper) {
|
|
ecmd->supported |= (SUPPORTED_1000baseT_Full |
|
|
SUPPORTED_TP | SUPPORTED_Autoneg);
|
|
|
|
ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
|
|
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
|
|
ecmd->advertising |= ADVERTISED_10000baseT_Full;
|
|
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
|
|
ecmd->advertising |= ADVERTISED_1000baseT_Full;
|
|
|
|
ecmd->port = PORT_TP;
|
|
} else {
|
|
ecmd->supported |= SUPPORTED_FIBRE;
|
|
ecmd->advertising = (ADVERTISED_10000baseT_Full |
|
|
ADVERTISED_FIBRE);
|
|
ecmd->port = PORT_FIBRE;
|
|
}
|
|
|
|
adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
|
|
if (link_up) {
|
|
ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
|
|
SPEED_10000 : SPEED_1000;
|
|
ecmd->duplex = DUPLEX_FULL;
|
|
} else {
|
|
ecmd->speed = -1;
|
|
ecmd->duplex = -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ixgbe_set_settings(struct net_device *netdev,
|
|
struct ethtool_cmd *ecmd)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
switch (hw->phy.media_type) {
|
|
case ixgbe_media_type_fiber:
|
|
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
|
|
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
|
|
return -EINVAL;
|
|
/* in this case we currently only support 10Gb/FULL */
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ixgbe_get_pauseparam(struct net_device *netdev,
|
|
struct ethtool_pauseparam *pause)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
|
|
|
|
if (hw->fc.type == ixgbe_fc_rx_pause) {
|
|
pause->rx_pause = 1;
|
|
} else if (hw->fc.type == ixgbe_fc_tx_pause) {
|
|
pause->tx_pause = 1;
|
|
} else if (hw->fc.type == ixgbe_fc_full) {
|
|
pause->rx_pause = 1;
|
|
pause->tx_pause = 1;
|
|
}
|
|
}
|
|
|
|
static int ixgbe_set_pauseparam(struct net_device *netdev,
|
|
struct ethtool_pauseparam *pause)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
if ((pause->autoneg == AUTONEG_ENABLE) ||
|
|
(pause->rx_pause && pause->tx_pause))
|
|
hw->fc.type = ixgbe_fc_full;
|
|
else if (pause->rx_pause && !pause->tx_pause)
|
|
hw->fc.type = ixgbe_fc_rx_pause;
|
|
else if (!pause->rx_pause && pause->tx_pause)
|
|
hw->fc.type = ixgbe_fc_tx_pause;
|
|
else if (!pause->rx_pause && !pause->tx_pause)
|
|
hw->fc.type = ixgbe_fc_none;
|
|
else
|
|
return -EINVAL;
|
|
|
|
hw->fc.original_type = hw->fc.type;
|
|
|
|
if (netif_running(netdev))
|
|
ixgbe_reinit_locked(adapter);
|
|
else
|
|
ixgbe_reset(adapter);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u32 ixgbe_get_rx_csum(struct net_device *netdev)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
|
|
}
|
|
|
|
static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
if (data)
|
|
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
|
|
else
|
|
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
|
|
|
|
if (netif_running(netdev))
|
|
ixgbe_reinit_locked(adapter);
|
|
else
|
|
ixgbe_reset(adapter);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u32 ixgbe_get_tx_csum(struct net_device *netdev)
|
|
{
|
|
return (netdev->features & NETIF_F_HW_CSUM) != 0;
|
|
}
|
|
|
|
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
|
|
{
|
|
if (data)
|
|
netdev->features |= NETIF_F_HW_CSUM;
|
|
else
|
|
netdev->features &= ~NETIF_F_HW_CSUM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ixgbe_set_tso(struct net_device *netdev, u32 data)
|
|
{
|
|
if (data) {
|
|
netdev->features |= NETIF_F_TSO;
|
|
netdev->features |= NETIF_F_TSO6;
|
|
} else {
|
|
netif_tx_stop_all_queues(netdev);
|
|
netdev->features &= ~NETIF_F_TSO;
|
|
netdev->features &= ~NETIF_F_TSO6;
|
|
netif_tx_start_all_queues(netdev);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static u32 ixgbe_get_msglevel(struct net_device *netdev)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
return adapter->msg_enable;
|
|
}
|
|
|
|
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
adapter->msg_enable = data;
|
|
}
|
|
|
|
static int ixgbe_get_regs_len(struct net_device *netdev)
|
|
{
|
|
#define IXGBE_REGS_LEN 1128
|
|
return IXGBE_REGS_LEN * sizeof(u32);
|
|
}
|
|
|
|
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
|
|
|
|
static void ixgbe_get_regs(struct net_device *netdev,
|
|
struct ethtool_regs *regs, void *p)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
u32 *regs_buff = p;
|
|
u8 i;
|
|
|
|
memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
|
|
|
|
regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
|
|
|
|
/* General Registers */
|
|
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
|
|
regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
|
|
regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
|
|
regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
|
|
regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
|
|
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
|
|
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
|
|
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
|
|
|
|
/* NVM Register */
|
|
regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
|
|
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
|
|
regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
|
|
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
|
|
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
|
|
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
|
|
regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
|
|
regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
|
|
regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
|
|
regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
|
|
|
|
/* Interrupt */
|
|
regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR);
|
|
regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
|
|
regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
|
|
regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
|
|
regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
|
|
regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
|
|
regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
|
|
regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
|
|
regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
|
|
regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
|
|
regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
|
|
regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
|
|
|
|
/* Flow Control */
|
|
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
|
|
regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
|
|
regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
|
|
regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
|
|
regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
|
|
regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
|
|
regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
|
|
|
|
/* Receive DMA */
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
|
|
for (i = 0; i < 64; i++)
|
|
regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
|
regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
|
|
regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
|
|
regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
|
|
|
|
/* Receive */
|
|
regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
|
|
regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
|
|
regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
|
|
regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
|
regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
|
|
regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
|
|
regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
|
|
regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
|
|
regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
|
|
|
|
/* Transmit */
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
|
|
for (i = 0; i < 32; i++)
|
|
regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
|
|
regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
|
|
regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
|
|
regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
|
|
|
|
/* Wake Up */
|
|
regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
|
|
regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
|
|
regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
|
|
regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
|
|
regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
|
|
regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
|
|
regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
|
|
regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
|
|
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
|
|
|
|
/* DCE */
|
|
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
|
|
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
|
|
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
|
|
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
|
|
|
|
/* Statistics */
|
|
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
|
|
regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
|
|
regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
|
|
regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
|
|
regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
|
|
regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
|
|
regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
|
|
regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
|
|
regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
|
|
regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
|
|
regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
|
|
regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
|
|
regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
|
|
regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
|
|
regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
|
|
regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
|
|
regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
|
|
regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
|
|
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
|
|
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
|
|
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
|
|
regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
|
|
regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
|
|
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
|
|
regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
|
|
regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
|
|
regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
|
|
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
|
|
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
|
|
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
|
|
regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
|
|
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
|
|
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
|
|
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
|
|
regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
|
|
regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
|
|
regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
|
|
regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
|
|
regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
|
|
regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
|
|
regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
|
|
regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
|
|
for (i = 0; i < 16; i++)
|
|
regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
|
|
|
|
/* MAC */
|
|
regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
|
|
regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
|
|
regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
|
|
regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
|
|
regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
|
|
regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
|
|
regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
|
|
regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
|
|
regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
|
|
regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
|
|
regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
|
|
regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
|
|
regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
|
|
regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
|
|
regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
|
|
regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
|
|
regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
|
|
regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
|
|
regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
|
|
regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
|
|
regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
|
|
regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
|
|
regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
|
|
regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
|
|
regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
|
|
regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
|
|
regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
|
regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
|
|
regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
|
|
regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
|
|
regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
|
|
regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
|
|
regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
|
|
|
|
/* Diagnostic */
|
|
regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
|
|
regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
|
|
regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0);
|
|
regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1);
|
|
regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
|
|
regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
|
|
regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
|
|
regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
|
|
regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
|
|
regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0);
|
|
regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1);
|
|
regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
|
|
regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
|
|
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
|
|
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
|
|
regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
|
|
regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
|
|
regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
|
|
regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
|
|
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
|
|
regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
|
|
regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
|
|
regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
|
|
regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
|
|
for (i = 0; i < 8; i++)
|
|
regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
|
|
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
|
|
regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
|
|
regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
|
|
regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
|
|
regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
|
|
regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
|
|
regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
|
|
regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
|
|
regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
|
|
}
|
|
|
|
static int ixgbe_get_eeprom_len(struct net_device *netdev)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
return adapter->hw.eeprom.word_size * 2;
|
|
}
|
|
|
|
static int ixgbe_get_eeprom(struct net_device *netdev,
|
|
struct ethtool_eeprom *eeprom, u8 *bytes)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
u16 *eeprom_buff;
|
|
int first_word, last_word, eeprom_len;
|
|
int ret_val = 0;
|
|
u16 i;
|
|
|
|
if (eeprom->len == 0)
|
|
return -EINVAL;
|
|
|
|
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
|
|
|
|
first_word = eeprom->offset >> 1;
|
|
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
|
|
eeprom_len = last_word - first_word + 1;
|
|
|
|
eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
|
|
if (!eeprom_buff)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < eeprom_len; i++) {
|
|
if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
|
|
&eeprom_buff[i])))
|
|
break;
|
|
}
|
|
|
|
/* Device's eeprom is always little-endian, word addressable */
|
|
for (i = 0; i < eeprom_len; i++)
|
|
le16_to_cpus(&eeprom_buff[i]);
|
|
|
|
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
|
|
kfree(eeprom_buff);
|
|
|
|
return ret_val;
|
|
}
|
|
|
|
static void ixgbe_get_drvinfo(struct net_device *netdev,
|
|
struct ethtool_drvinfo *drvinfo)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
|
strncpy(drvinfo->driver, ixgbe_driver_name, 32);
|
|
strncpy(drvinfo->version, ixgbe_driver_version, 32);
|
|
strncpy(drvinfo->fw_version, "N/A", 32);
|
|
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
|
|
drvinfo->n_stats = IXGBE_STATS_LEN;
|
|
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
|
|
}
|
|
|
|
static void ixgbe_get_ringparam(struct net_device *netdev,
|
|
struct ethtool_ringparam *ring)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_ring *tx_ring = adapter->tx_ring;
|
|
struct ixgbe_ring *rx_ring = adapter->rx_ring;
|
|
|
|
ring->rx_max_pending = IXGBE_MAX_RXD;
|
|
ring->tx_max_pending = IXGBE_MAX_TXD;
|
|
ring->rx_mini_max_pending = 0;
|
|
ring->rx_jumbo_max_pending = 0;
|
|
ring->rx_pending = rx_ring->count;
|
|
ring->tx_pending = tx_ring->count;
|
|
ring->rx_mini_pending = 0;
|
|
ring->rx_jumbo_pending = 0;
|
|
}
|
|
|
|
static int ixgbe_set_ringparam(struct net_device *netdev,
|
|
struct ethtool_ringparam *ring)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
struct ixgbe_tx_buffer *old_buf;
|
|
struct ixgbe_rx_buffer *old_rx_buf;
|
|
void *old_desc;
|
|
int i, err;
|
|
u32 new_rx_count, new_tx_count, old_size;
|
|
dma_addr_t old_dma;
|
|
|
|
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
|
return -EINVAL;
|
|
|
|
new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
|
|
new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
|
|
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
|
|
|
|
new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
|
|
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
|
|
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
|
|
|
|
if ((new_tx_count == adapter->tx_ring->count) &&
|
|
(new_rx_count == adapter->rx_ring->count)) {
|
|
/* nothing to do */
|
|
return 0;
|
|
}
|
|
|
|
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
|
|
msleep(1);
|
|
|
|
if (netif_running(netdev))
|
|
ixgbe_down(adapter);
|
|
|
|
/*
|
|
* We can't just free everything and then setup again,
|
|
* because the ISRs in MSI-X mode get passed pointers
|
|
* to the tx and rx ring structs.
|
|
*/
|
|
if (new_tx_count != adapter->tx_ring->count) {
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
/* Save existing descriptor ring */
|
|
old_buf = adapter->tx_ring[i].tx_buffer_info;
|
|
old_desc = adapter->tx_ring[i].desc;
|
|
old_size = adapter->tx_ring[i].size;
|
|
old_dma = adapter->tx_ring[i].dma;
|
|
/* Try to allocate a new one */
|
|
adapter->tx_ring[i].tx_buffer_info = NULL;
|
|
adapter->tx_ring[i].desc = NULL;
|
|
adapter->tx_ring[i].count = new_tx_count;
|
|
err = ixgbe_setup_tx_resources(adapter,
|
|
&adapter->tx_ring[i]);
|
|
if (err) {
|
|
/* Restore the old one so at least
|
|
the adapter still works, even if
|
|
we failed the request */
|
|
adapter->tx_ring[i].tx_buffer_info = old_buf;
|
|
adapter->tx_ring[i].desc = old_desc;
|
|
adapter->tx_ring[i].size = old_size;
|
|
adapter->tx_ring[i].dma = old_dma;
|
|
goto err_setup;
|
|
}
|
|
/* Free the old buffer manually */
|
|
vfree(old_buf);
|
|
pci_free_consistent(adapter->pdev, old_size,
|
|
old_desc, old_dma);
|
|
}
|
|
}
|
|
|
|
if (new_rx_count != adapter->rx_ring->count) {
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
old_rx_buf = adapter->rx_ring[i].rx_buffer_info;
|
|
old_desc = adapter->rx_ring[i].desc;
|
|
old_size = adapter->rx_ring[i].size;
|
|
old_dma = adapter->rx_ring[i].dma;
|
|
|
|
adapter->rx_ring[i].rx_buffer_info = NULL;
|
|
adapter->rx_ring[i].desc = NULL;
|
|
adapter->rx_ring[i].dma = 0;
|
|
adapter->rx_ring[i].count = new_rx_count;
|
|
err = ixgbe_setup_rx_resources(adapter,
|
|
&adapter->rx_ring[i]);
|
|
if (err) {
|
|
adapter->rx_ring[i].rx_buffer_info = old_rx_buf;
|
|
adapter->rx_ring[i].desc = old_desc;
|
|
adapter->rx_ring[i].size = old_size;
|
|
adapter->rx_ring[i].dma = old_dma;
|
|
goto err_setup;
|
|
}
|
|
|
|
vfree(old_rx_buf);
|
|
pci_free_consistent(adapter->pdev, old_size, old_desc,
|
|
old_dma);
|
|
}
|
|
}
|
|
|
|
err = 0;
|
|
err_setup:
|
|
if (netif_running(adapter->netdev))
|
|
ixgbe_up(adapter);
|
|
|
|
clear_bit(__IXGBE_RESETTING, &adapter->state);
|
|
return err;
|
|
}
|
|
|
|
static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
|
|
{
|
|
switch (sset) {
|
|
case ETH_SS_STATS:
|
|
return IXGBE_STATS_LEN;
|
|
default:
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
|
|
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
|
struct ethtool_stats *stats, u64 *data)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
u64 *queue_stat;
|
|
int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
|
|
int j, k;
|
|
int i;
|
|
u64 aggregated = 0, flushed = 0, no_desc = 0;
|
|
|
|
ixgbe_update_stats(adapter);
|
|
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
|
|
char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
|
|
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
|
|
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
|
}
|
|
for (j = 0; j < adapter->num_tx_queues; j++) {
|
|
queue_stat = (u64 *)&adapter->tx_ring[j].stats;
|
|
for (k = 0; k < stat_count; k++)
|
|
data[i + k] = queue_stat[k];
|
|
i += k;
|
|
}
|
|
for (j = 0; j < adapter->num_rx_queues; j++) {
|
|
aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
|
|
flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
|
|
no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
|
|
queue_stat = (u64 *)&adapter->rx_ring[j].stats;
|
|
for (k = 0; k < stat_count; k++)
|
|
data[i + k] = queue_stat[k];
|
|
i += k;
|
|
}
|
|
adapter->lro_aggregated = aggregated;
|
|
adapter->lro_flushed = flushed;
|
|
adapter->lro_no_desc = no_desc;
|
|
}
|
|
|
|
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
|
|
u8 *data)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
u8 *p = data;
|
|
int i;
|
|
|
|
switch (stringset) {
|
|
case ETH_SS_STATS:
|
|
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
|
|
memcpy(p, ixgbe_gstrings_stats[i].stat_string,
|
|
ETH_GSTRING_LEN);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
sprintf(p, "tx_queue_%u_packets", i);
|
|
p += ETH_GSTRING_LEN;
|
|
sprintf(p, "tx_queue_%u_bytes", i);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
sprintf(p, "rx_queue_%u_packets", i);
|
|
p += ETH_GSTRING_LEN;
|
|
sprintf(p, "rx_queue_%u_bytes", i);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
static void ixgbe_get_wol(struct net_device *netdev,
|
|
struct ethtool_wolinfo *wol)
|
|
{
|
|
wol->supported = 0;
|
|
wol->wolopts = 0;
|
|
|
|
return;
|
|
}
|
|
|
|
static int ixgbe_nway_reset(struct net_device *netdev)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
|
if (netif_running(netdev))
|
|
ixgbe_reinit_locked(adapter);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ixgbe_phys_id(struct net_device *netdev, u32 data)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
|
|
u32 i;
|
|
|
|
if (!data || data > 300)
|
|
data = 300;
|
|
|
|
for (i = 0; i < (data * 1000); i += 400) {
|
|
ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
|
|
msleep_interruptible(200);
|
|
ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
|
|
msleep_interruptible(200);
|
|
}
|
|
|
|
/* Restore LED settings */
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ixgbe_get_coalesce(struct net_device *netdev,
|
|
struct ethtool_coalesce *ec)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
|
if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
|
|
ec->rx_coalesce_usecs = adapter->rx_eitr;
|
|
else
|
|
ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
|
|
|
|
if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
|
|
ec->tx_coalesce_usecs = adapter->tx_eitr;
|
|
else
|
|
ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
|
|
|
|
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
|
|
return 0;
|
|
}
|
|
|
|
static int ixgbe_set_coalesce(struct net_device *netdev,
|
|
struct ethtool_coalesce *ec)
|
|
{
|
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
|
|
|
if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
|
|
((ec->rx_coalesce_usecs != 0) &&
|
|
(ec->rx_coalesce_usecs != 1) &&
|
|
(ec->rx_coalesce_usecs != 3) &&
|
|
(ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
|
|
return -EINVAL;
|
|
if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
|
|
((ec->tx_coalesce_usecs != 0) &&
|
|
(ec->tx_coalesce_usecs != 1) &&
|
|
(ec->tx_coalesce_usecs != 3) &&
|
|
(ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
|
|
return -EINVAL;
|
|
|
|
/* convert to rate of irq's per second */
|
|
if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
|
|
adapter->rx_eitr = ec->rx_coalesce_usecs;
|
|
else
|
|
adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
|
|
|
|
if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
|
|
adapter->tx_eitr = ec->rx_coalesce_usecs;
|
|
else
|
|
adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
|
|
|
|
if (ec->tx_max_coalesced_frames_irq)
|
|
adapter->tx_ring[0].work_limit =
|
|
ec->tx_max_coalesced_frames_irq;
|
|
|
|
if (netif_running(netdev)) {
|
|
ixgbe_down(adapter);
|
|
ixgbe_up(adapter);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
static struct ethtool_ops ixgbe_ethtool_ops = {
|
|
.get_settings = ixgbe_get_settings,
|
|
.set_settings = ixgbe_set_settings,
|
|
.get_drvinfo = ixgbe_get_drvinfo,
|
|
.get_regs_len = ixgbe_get_regs_len,
|
|
.get_regs = ixgbe_get_regs,
|
|
.get_wol = ixgbe_get_wol,
|
|
.nway_reset = ixgbe_nway_reset,
|
|
.get_link = ethtool_op_get_link,
|
|
.get_eeprom_len = ixgbe_get_eeprom_len,
|
|
.get_eeprom = ixgbe_get_eeprom,
|
|
.get_ringparam = ixgbe_get_ringparam,
|
|
.set_ringparam = ixgbe_set_ringparam,
|
|
.get_pauseparam = ixgbe_get_pauseparam,
|
|
.set_pauseparam = ixgbe_set_pauseparam,
|
|
.get_rx_csum = ixgbe_get_rx_csum,
|
|
.set_rx_csum = ixgbe_set_rx_csum,
|
|
.get_tx_csum = ixgbe_get_tx_csum,
|
|
.set_tx_csum = ixgbe_set_tx_csum,
|
|
.get_sg = ethtool_op_get_sg,
|
|
.set_sg = ethtool_op_set_sg,
|
|
.get_msglevel = ixgbe_get_msglevel,
|
|
.set_msglevel = ixgbe_set_msglevel,
|
|
.get_tso = ethtool_op_get_tso,
|
|
.set_tso = ixgbe_set_tso,
|
|
.get_strings = ixgbe_get_strings,
|
|
.phys_id = ixgbe_phys_id,
|
|
.get_sset_count = ixgbe_get_sset_count,
|
|
.get_ethtool_stats = ixgbe_get_ethtool_stats,
|
|
.get_coalesce = ixgbe_get_coalesce,
|
|
.set_coalesce = ixgbe_set_coalesce,
|
|
.get_flags = ethtool_op_get_flags,
|
|
.set_flags = ethtool_op_set_flags,
|
|
};
|
|
|
|
void ixgbe_set_ethtool_ops(struct net_device *netdev)
|
|
{
|
|
SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
|
|
}
|