1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/types.h>
31 #include <linux/module.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/ethtool.h>
35 #include <linux/vmalloc.h>
36 #include <linux/highmem.h>
38 #include <asm/uaccess.h>
42 #ifndef ETH_GSTRING_LEN
43 #define ETH_GSTRING_LEN 32
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 #ifdef ETHTOOL_OPS_COMPAT
49 #include "kcompat_ethtool.c"
53 char stat_string[ETH_GSTRING_LEN];
58 #define IXGBE_NETDEV_STAT(_net_stat) { \
59 .stat_string = #_net_stat, \
60 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
61 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
63 static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = {
64 IXGBE_NETDEV_STAT(rx_packets),
65 IXGBE_NETDEV_STAT(tx_packets),
66 IXGBE_NETDEV_STAT(rx_bytes),
67 IXGBE_NETDEV_STAT(tx_bytes),
68 IXGBE_NETDEV_STAT(rx_errors),
69 IXGBE_NETDEV_STAT(tx_errors),
70 IXGBE_NETDEV_STAT(rx_dropped),
71 IXGBE_NETDEV_STAT(tx_dropped),
72 IXGBE_NETDEV_STAT(multicast),
73 IXGBE_NETDEV_STAT(collisions),
74 IXGBE_NETDEV_STAT(rx_over_errors),
75 IXGBE_NETDEV_STAT(rx_crc_errors),
76 IXGBE_NETDEV_STAT(rx_frame_errors),
77 IXGBE_NETDEV_STAT(rx_fifo_errors),
78 IXGBE_NETDEV_STAT(rx_missed_errors),
79 IXGBE_NETDEV_STAT(tx_aborted_errors),
80 IXGBE_NETDEV_STAT(tx_carrier_errors),
81 IXGBE_NETDEV_STAT(tx_fifo_errors),
82 IXGBE_NETDEV_STAT(tx_heartbeat_errors),
85 #define IXGBE_STAT(_name, _stat) { \
86 .stat_string = _name, \
87 .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \
88 .stat_offset = offsetof(struct ixgbe_adapter, _stat) \
90 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
91 IXGBE_STAT("rx_pkts_nic", stats.gprc),
92 IXGBE_STAT("tx_pkts_nic", stats.gptc),
93 IXGBE_STAT("rx_bytes_nic", stats.gorc),
94 IXGBE_STAT("tx_bytes_nic", stats.gotc),
95 IXGBE_STAT("lsc_int", lsc_int),
96 IXGBE_STAT("tx_busy", tx_busy),
97 IXGBE_STAT("non_eop_descs", non_eop_descs),
98 #ifndef CONFIG_IXGBE_NAPI
99 IXGBE_STAT("rx_dropped_backlog", rx_dropped_backlog),
101 IXGBE_STAT("broadcast", stats.bprc),
102 IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) ,
103 IXGBE_STAT("tx_timeout_count", tx_timeout_count),
104 IXGBE_STAT("tx_restart_queue", restart_queue),
105 IXGBE_STAT("rx_long_length_errors", stats.roc),
106 IXGBE_STAT("rx_short_length_errors", stats.ruc),
107 IXGBE_STAT("tx_flow_control_xon", stats.lxontxc),
108 IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc),
109 IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc),
110 IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc),
111 IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error),
112 IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
113 IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
115 IXGBE_STAT("lro_aggregated", lro_stats.coal),
116 IXGBE_STAT("lro_flushed", lro_stats.flushed),
117 #endif /* IXGBE_NO_LRO */
118 IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources),
119 IXGBE_STAT("hw_rsc_aggregated", rsc_total_count),
120 IXGBE_STAT("hw_rsc_flushed", rsc_total_flush),
122 IXGBE_STAT("fdir_match", stats.fdirmatch),
123 IXGBE_STAT("fdir_miss", stats.fdirmiss),
124 IXGBE_STAT("fdir_overflow", fdir_overflow),
125 #endif /* HAVE_TX_MQ */
127 IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc),
128 IXGBE_STAT("fcoe_last_errors", stats.fclast),
129 IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc),
130 IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc),
131 IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc),
132 IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp),
133 IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff),
134 IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc),
135 IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc),
136 #endif /* IXGBE_FCOE */
137 IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
138 IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
139 IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc),
140 IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc),
143 #define IXGBE_QUEUE_STATS_LEN \
144 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
145 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
146 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
147 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
148 #define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats)
149 #define IXGBE_PB_STATS_LEN ( \
150 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
151 IXGBE_FLAG_DCB_ENABLED) ? \
152 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
153 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
154 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
155 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
157 #define IXGBE_VF_STATS_LEN \
158 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
159 (sizeof(struct vf_stats) / sizeof(u64)))
160 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
161 IXGBE_NETDEV_STATS_LEN + \
162 IXGBE_PB_STATS_LEN + \
163 IXGBE_QUEUE_STATS_LEN + \
166 #endif /* ETHTOOL_GSTATS */
168 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
169 "Register test (offline)", "Eeprom test (offline)",
170 "Interrupt test (offline)", "Loopback test (offline)",
171 "Link test (on/offline)"
173 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
174 #endif /* ETHTOOL_TEST */
176 int ixgbe_get_settings(struct net_device *netdev,
177 struct ethtool_cmd *ecmd)
179 struct ixgbe_adapter *adapter = netdev_priv(netdev);
180 struct ixgbe_hw *hw = &adapter->hw;
184 ecmd->supported = SUPPORTED_10000baseT_Full;
185 ecmd->autoneg = AUTONEG_ENABLE;
186 ecmd->transceiver = XCVR_EXTERNAL;
187 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
188 (hw->phy.multispeed_fiber)) {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full |
191 switch (hw->mac.type) {
193 ecmd->supported |= SUPPORTED_100baseT_Full;
199 ecmd->advertising = ADVERTISED_Autoneg;
200 if (hw->phy.autoneg_advertised) {
201 if (hw->phy.autoneg_advertised &
202 IXGBE_LINK_SPEED_100_FULL)
203 ecmd->advertising |= ADVERTISED_100baseT_Full;
204 if (hw->phy.autoneg_advertised &
205 IXGBE_LINK_SPEED_10GB_FULL)
206 ecmd->advertising |= ADVERTISED_10000baseT_Full;
207 if (hw->phy.autoneg_advertised &
208 IXGBE_LINK_SPEED_1GB_FULL)
209 ecmd->advertising |= ADVERTISED_1000baseT_Full;
212 * Default advertised modes in case
213 * phy.autoneg_advertised isn't set.
215 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
216 ADVERTISED_1000baseT_Full);
217 if (hw->mac.type == ixgbe_mac_X540)
218 ecmd->advertising |= ADVERTISED_100baseT_Full;
221 if (hw->phy.media_type == ixgbe_media_type_copper) {
222 ecmd->supported |= SUPPORTED_TP;
223 ecmd->advertising |= ADVERTISED_TP;
224 ecmd->port = PORT_TP;
226 ecmd->supported |= SUPPORTED_FIBRE;
227 ecmd->advertising |= ADVERTISED_FIBRE;
228 ecmd->port = PORT_FIBRE;
230 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
231 /* Set as FIBRE until SERDES defined in kernel */
232 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
233 ecmd->supported = (SUPPORTED_1000baseT_Full |
235 ecmd->advertising = (ADVERTISED_1000baseT_Full |
237 ecmd->port = PORT_FIBRE;
238 ecmd->autoneg = AUTONEG_DISABLE;
239 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
240 || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
241 ecmd->supported |= (SUPPORTED_1000baseT_Full |
244 ecmd->advertising = (ADVERTISED_10000baseT_Full |
245 ADVERTISED_1000baseT_Full |
248 ecmd->port = PORT_FIBRE;
250 ecmd->supported |= (SUPPORTED_1000baseT_Full |
252 ecmd->advertising = (ADVERTISED_10000baseT_Full |
253 ADVERTISED_1000baseT_Full |
255 ecmd->port = PORT_FIBRE;
258 ecmd->supported |= SUPPORTED_FIBRE;
259 ecmd->advertising = (ADVERTISED_10000baseT_Full |
261 ecmd->port = PORT_FIBRE;
262 ecmd->autoneg = AUTONEG_DISABLE;
265 #ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
267 switch (adapter->hw.phy.type) {
270 case ixgbe_phy_cu_unknown:
271 /* Copper 10G-BASET */
272 ecmd->port = PORT_TP;
275 ecmd->port = PORT_FIBRE;
278 case ixgbe_phy_sfp_passive_tyco:
279 case ixgbe_phy_sfp_passive_unknown:
280 case ixgbe_phy_sfp_ftl:
281 case ixgbe_phy_sfp_avago:
282 case ixgbe_phy_sfp_intel:
283 case ixgbe_phy_sfp_unknown:
284 switch (adapter->hw.phy.sfp_type) {
285 /* SFP+ devices, further checking needed */
286 case ixgbe_sfp_type_da_cu:
287 case ixgbe_sfp_type_da_cu_core0:
288 case ixgbe_sfp_type_da_cu_core1:
289 ecmd->port = PORT_DA;
291 case ixgbe_sfp_type_sr:
292 case ixgbe_sfp_type_lr:
293 case ixgbe_sfp_type_srlr_core0:
294 case ixgbe_sfp_type_srlr_core1:
295 ecmd->port = PORT_FIBRE;
297 case ixgbe_sfp_type_not_present:
298 ecmd->port = PORT_NONE;
300 case ixgbe_sfp_type_1g_cu_core0:
301 case ixgbe_sfp_type_1g_cu_core1:
302 ecmd->port = PORT_TP;
303 ecmd->supported = SUPPORTED_TP;
304 ecmd->advertising = (ADVERTISED_1000baseT_Full |
307 case ixgbe_sfp_type_1g_sx_core0:
308 case ixgbe_sfp_type_1g_sx_core1:
309 ecmd->port = PORT_FIBRE;
310 ecmd->supported = SUPPORTED_FIBRE;
311 ecmd->advertising = (ADVERTISED_1000baseT_Full |
314 case ixgbe_sfp_type_unknown:
316 ecmd->port = PORT_OTHER;
321 ecmd->port = PORT_NONE;
323 case ixgbe_phy_unknown:
324 case ixgbe_phy_generic:
325 case ixgbe_phy_sfp_unsupported:
327 ecmd->port = PORT_OTHER;
332 if (!in_interrupt()) {
333 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
336 * this case is a special workaround for RHEL5 bonding
337 * that calls this routine from interrupt context
339 link_speed = adapter->link_speed;
340 link_up = adapter->link_up;
344 switch (link_speed) {
345 case IXGBE_LINK_SPEED_10GB_FULL:
346 ecmd->speed = SPEED_10000;
348 case IXGBE_LINK_SPEED_1GB_FULL:
349 ecmd->speed = SPEED_1000;
351 case IXGBE_LINK_SPEED_100_FULL:
352 ecmd->speed = SPEED_100;
357 ecmd->duplex = DUPLEX_FULL;
366 static int ixgbe_set_settings(struct net_device *netdev,
367 struct ethtool_cmd *ecmd)
369 struct ixgbe_adapter *adapter = netdev_priv(netdev);
370 struct ixgbe_hw *hw = &adapter->hw;
374 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
375 (hw->phy.multispeed_fiber)) {
377 * this function does not support duplex forcing, but can
378 * limit the advertising of the adapter to the specified speed
380 if (ecmd->autoneg == AUTONEG_DISABLE)
383 if (ecmd->advertising & ~ecmd->supported)
386 old = hw->phy.autoneg_advertised;
388 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
389 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
391 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
392 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
394 if (ecmd->advertising & ADVERTISED_100baseT_Full)
395 advertised |= IXGBE_LINK_SPEED_100_FULL;
397 if (old == advertised)
399 /* this sets the link speed and restarts auto-neg */
400 hw->mac.autotry_restart = true;
401 err = hw->mac.ops.setup_link(hw, advertised, true, true);
403 e_info(probe, "setup link failed with code %d\n", err);
404 hw->mac.ops.setup_link(hw, old, true, true);
410 static void ixgbe_get_pauseparam(struct net_device *netdev,
411 struct ethtool_pauseparam *pause)
413 struct ixgbe_adapter *adapter = netdev_priv(netdev);
414 struct ixgbe_hw *hw = &adapter->hw;
416 if (hw->fc.disable_fc_autoneg)
421 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
423 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
425 } else if (hw->fc.current_mode == ixgbe_fc_full) {
431 static int ixgbe_set_pauseparam(struct net_device *netdev,
432 struct ethtool_pauseparam *pause)
434 struct ixgbe_adapter *adapter = netdev_priv(netdev);
435 struct ixgbe_hw *hw = &adapter->hw;
436 struct ixgbe_fc_info fc = hw->fc;
438 /* 82598 does no support link flow control with DCB enabled */
439 if ((hw->mac.type == ixgbe_mac_82598EB) &&
440 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
443 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
445 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
446 fc.requested_mode = ixgbe_fc_full;
447 else if (pause->rx_pause)
448 fc.requested_mode = ixgbe_fc_rx_pause;
449 else if (pause->tx_pause)
450 fc.requested_mode = ixgbe_fc_tx_pause;
452 fc.requested_mode = ixgbe_fc_none;
454 /* if the thing changed then we'll update and use new autoneg */
455 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
457 if (netif_running(netdev))
458 ixgbe_reinit_locked(adapter);
460 ixgbe_reset(adapter);
466 static u32 ixgbe_get_msglevel(struct net_device *netdev)
468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
469 return adapter->msg_enable;
472 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
474 struct ixgbe_adapter *adapter = netdev_priv(netdev);
475 adapter->msg_enable = data;
478 static int ixgbe_get_regs_len(struct net_device *netdev)
480 #define IXGBE_REGS_LEN 1129
481 return IXGBE_REGS_LEN * sizeof(u32);
484 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
487 static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
490 struct ixgbe_adapter *adapter = netdev_priv(netdev);
491 struct ixgbe_hw *hw = &adapter->hw;
495 printk(KERN_DEBUG "ixgbe_get_regs_1\n");
496 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
497 printk(KERN_DEBUG "ixgbe_get_regs_2 0x%p\n", hw->hw_addr);
499 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
501 /* General Registers */
502 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
503 printk(KERN_DEBUG "ixgbe_get_regs_3\n");
504 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
505 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
506 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
507 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
508 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
509 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
510 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
512 printk(KERN_DEBUG "ixgbe_get_regs_4\n");
515 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
516 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
517 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
518 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
519 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
520 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
521 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
522 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
523 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
524 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
527 /* don't read EICR because it can clear interrupt causes, instead
528 * read EICS which is a shadow but doesn't clear EICR */
529 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
530 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
531 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
532 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
533 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
534 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
535 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
536 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
537 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
538 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
539 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
540 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
543 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
544 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
545 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
546 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
547 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
548 for (i = 0; i < 8; i++) {
549 switch (hw->mac.type) {
550 case ixgbe_mac_82598EB:
551 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
552 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
554 case ixgbe_mac_82599EB:
556 regs_buff[35 + i] = IXGBE_READ_REG(hw,
557 IXGBE_FCRTL_82599(i));
558 regs_buff[43 + i] = IXGBE_READ_REG(hw,
559 IXGBE_FCRTH_82599(i));
565 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
566 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
569 for (i = 0; i < 64; i++)
570 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
571 for (i = 0; i < 64; i++)
572 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
573 for (i = 0; i < 64; i++)
574 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
575 for (i = 0; i < 64; i++)
576 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
577 for (i = 0; i < 64; i++)
578 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
579 for (i = 0; i < 64; i++)
580 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
581 for (i = 0; i < 16; i++)
582 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
583 for (i = 0; i < 16; i++)
584 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
585 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
586 for (i = 0; i < 8; i++)
587 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
588 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
589 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
592 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
593 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
594 for (i = 0; i < 16; i++)
595 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
596 for (i = 0; i < 16; i++)
597 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
598 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
599 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
600 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
601 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
602 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
603 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
604 for (i = 0; i < 8; i++)
605 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
606 for (i = 0; i < 8; i++)
607 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
608 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
611 for (i = 0; i < 32; i++)
612 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
613 for (i = 0; i < 32; i++)
614 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
615 for (i = 0; i < 32; i++)
616 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
617 for (i = 0; i < 32; i++)
618 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
619 for (i = 0; i < 32; i++)
620 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
621 for (i = 0; i < 32; i++)
622 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
623 for (i = 0; i < 32; i++)
624 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
625 for (i = 0; i < 32; i++)
626 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
627 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
628 for (i = 0; i < 16; i++)
629 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
630 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
631 for (i = 0; i < 8; i++)
632 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
633 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
636 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
637 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
638 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
639 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
640 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
641 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
642 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
643 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
644 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
647 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
648 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
649 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
650 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
651 for (i = 0; i < 8; i++)
652 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
653 for (i = 0; i < 8; i++)
654 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
655 for (i = 0; i < 8; i++)
656 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
657 for (i = 0; i < 8; i++)
658 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
659 for (i = 0; i < 8; i++)
660 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
661 for (i = 0; i < 8; i++)
662 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
665 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
666 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
667 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
668 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
669 for (i = 0; i < 8; i++)
670 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
671 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
672 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
673 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
674 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
675 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
676 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
677 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
678 for (i = 0; i < 8; i++)
679 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
680 for (i = 0; i < 8; i++)
681 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
682 for (i = 0; i < 8; i++)
683 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
684 for (i = 0; i < 8; i++)
685 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
686 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
687 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
688 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
689 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
690 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
691 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
692 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
693 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
694 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
695 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
696 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
697 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
698 for (i = 0; i < 8; i++)
699 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
700 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
701 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
702 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
703 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
704 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
705 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
706 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
707 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
708 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
709 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
710 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
711 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
712 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
713 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
714 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
715 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
716 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
717 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
718 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
719 for (i = 0; i < 16; i++)
720 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
721 for (i = 0; i < 16; i++)
722 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
723 for (i = 0; i < 16; i++)
724 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
725 for (i = 0; i < 16; i++)
726 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
729 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
730 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
731 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
732 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
733 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
734 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
735 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
736 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
737 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
738 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
739 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
740 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
741 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
742 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
743 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
744 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
745 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
746 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
747 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
748 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
749 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
750 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
751 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
752 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
753 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
754 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
755 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
756 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
757 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
758 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
759 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
760 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
761 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
764 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
765 for (i = 0; i < 8; i++)
766 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
767 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
768 for (i = 0; i < 4; i++)
769 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
770 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
771 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
772 for (i = 0; i < 8; i++)
773 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
774 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
775 for (i = 0; i < 4; i++)
776 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
777 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
778 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
779 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
780 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
781 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
782 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
783 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
784 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
785 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
786 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
787 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
788 for (i = 0; i < 8; i++)
789 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
790 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
791 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
792 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
793 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
794 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
795 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
796 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
797 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
798 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
800 /* 82599 X540 specific registers */
801 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
804 static int ixgbe_get_eeprom_len(struct net_device *netdev)
806 struct ixgbe_adapter *adapter = netdev_priv(netdev);
807 return adapter->hw.eeprom.word_size * 2;
810 static int ixgbe_get_eeprom(struct net_device *netdev,
811 struct ethtool_eeprom *eeprom, u8 *bytes)
813 struct ixgbe_adapter *adapter = netdev_priv(netdev);
814 struct ixgbe_hw *hw = &adapter->hw;
816 int first_word, last_word, eeprom_len;
820 if (eeprom->len == 0)
823 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
825 first_word = eeprom->offset >> 1;
826 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
827 eeprom_len = last_word - first_word + 1;
829 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
833 ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
836 /* Device's eeprom is always little-endian, word addressable */
837 for (i = 0; i < eeprom_len; i++)
838 le16_to_cpus(&eeprom_buff[i]);
840 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
846 static int ixgbe_set_eeprom(struct net_device *netdev,
847 struct ethtool_eeprom *eeprom, u8 *bytes)
849 struct ixgbe_adapter *adapter = netdev_priv(netdev);
850 struct ixgbe_hw *hw = &adapter->hw;
853 int max_len, first_word, last_word, ret_val = 0;
856 if (eeprom->len == 0)
859 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
862 max_len = hw->eeprom.word_size * 2;
864 first_word = eeprom->offset >> 1;
865 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
866 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
872 if (eeprom->offset & 1) {
874 * need read/modify/write of first changed EEPROM word
875 * only the second byte of the word is being modified
877 ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
883 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
885 * need read/modify/write of last changed EEPROM word
886 * only the first byte of the word is being modified
888 ret_val = ixgbe_read_eeprom(hw, last_word,
889 &eeprom_buff[last_word - first_word]);
894 /* Device's eeprom is always little-endian, word addressable */
895 for (i = 0; i < last_word - first_word + 1; i++)
896 le16_to_cpus(&eeprom_buff[i]);
898 memcpy(ptr, bytes, eeprom->len);
900 for (i = 0; i < last_word - first_word + 1; i++)
901 cpu_to_le16s(&eeprom_buff[i]);
903 ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
904 last_word - first_word + 1,
907 /* Update the checksum */
909 ixgbe_update_eeprom_checksum(hw);
916 static void ixgbe_get_drvinfo(struct net_device *netdev,
917 struct ethtool_drvinfo *drvinfo)
919 struct ixgbe_adapter *adapter = netdev_priv(netdev);
921 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
923 strlcpy(drvinfo->version, ixgbe_driver_version,
924 sizeof(drvinfo->version));
926 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
927 sizeof(drvinfo->fw_version));
929 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
930 sizeof(drvinfo->bus_info));
932 drvinfo->n_stats = IXGBE_STATS_LEN;
933 drvinfo->testinfo_len = IXGBE_TEST_LEN;
934 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
937 static void ixgbe_get_ringparam(struct net_device *netdev,
938 struct ethtool_ringparam *ring)
940 struct ixgbe_adapter *adapter = netdev_priv(netdev);
942 ring->rx_max_pending = IXGBE_MAX_RXD;
943 ring->tx_max_pending = IXGBE_MAX_TXD;
944 ring->rx_mini_max_pending = 0;
945 ring->rx_jumbo_max_pending = 0;
946 ring->rx_pending = adapter->rx_ring_count;
947 ring->tx_pending = adapter->tx_ring_count;
948 ring->rx_mini_pending = 0;
949 ring->rx_jumbo_pending = 0;
952 static int ixgbe_set_ringparam(struct net_device *netdev,
953 struct ethtool_ringparam *ring)
955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
956 struct ixgbe_ring *tx_ring = NULL, *rx_ring = NULL;
957 u32 new_rx_count, new_tx_count;
960 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
963 new_tx_count = clamp_t(u32, ring->tx_pending,
964 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
965 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
967 new_rx_count = clamp_t(u32, ring->rx_pending,
968 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
969 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
971 /* if nothing to do return success */
972 if ((new_tx_count == adapter->tx_ring_count) &&
973 (new_rx_count == adapter->rx_ring_count))
976 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
977 usleep_range(1000, 2000);
979 if (!netif_running(adapter->netdev)) {
980 for (i = 0; i < adapter->num_tx_queues; i++)
981 adapter->tx_ring[i]->count = new_tx_count;
982 for (i = 0; i < adapter->num_rx_queues; i++)
983 adapter->rx_ring[i]->count = new_rx_count;
984 adapter->tx_ring_count = new_tx_count;
985 adapter->rx_ring_count = new_rx_count;
989 /* alloc updated Tx resources */
990 if (new_tx_count != adapter->tx_ring_count) {
991 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
997 for (i = 0; i < adapter->num_tx_queues; i++) {
998 /* clone ring and setup updated count */
999 tx_ring[i] = *adapter->tx_ring[i];
1000 tx_ring[i].count = new_tx_count;
1001 err = ixgbe_setup_tx_resources(&tx_ring[i]);
1005 ixgbe_free_tx_resources(&tx_ring[i]);
1016 /* alloc updated Rx resources */
1017 if (new_rx_count != adapter->rx_ring_count) {
1018 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
1024 for (i = 0; i < adapter->num_rx_queues; i++) {
1025 /* clone ring and setup updated count */
1026 rx_ring[i] = *adapter->rx_ring[i];
1027 rx_ring[i].count = new_rx_count;
1028 err = ixgbe_setup_rx_resources(&rx_ring[i]);
1032 ixgbe_free_rx_resources(&rx_ring[i]);
1043 /* bring interface down to prepare for update */
1044 ixgbe_down(adapter);
1048 for (i = 0; i < adapter->num_tx_queues; i++) {
1049 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1050 *adapter->tx_ring[i] = tx_ring[i];
1052 adapter->tx_ring_count = new_tx_count;
1060 for (i = 0; i < adapter->num_rx_queues; i++) {
1061 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1062 *adapter->rx_ring[i] = rx_ring[i];
1064 adapter->rx_ring_count = new_rx_count;
1070 /* restore interface using new values */
1074 /* free Tx resources if Rx error is encountered */
1076 for (i = 0; i < adapter->num_tx_queues; i++)
1077 ixgbe_free_tx_resources(&tx_ring[i]);
1081 clear_bit(__IXGBE_RESETTING, &adapter->state);
1085 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
1086 static int ixgbe_get_stats_count(struct net_device *netdev)
1088 return IXGBE_STATS_LEN;
1091 #else /* HAVE_ETHTOOL_GET_SSET_COUNT */
1092 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1096 return IXGBE_TEST_LEN;
1098 return IXGBE_STATS_LEN;
1104 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
1105 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1106 struct ethtool_stats *stats, u64 *data)
1108 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1109 #ifdef HAVE_NETDEV_STATS_IN_NETDEV
1110 struct net_device_stats *net_stats = &netdev->stats;
1112 struct net_device_stats *net_stats = &adapter->net_stats;
1115 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1119 printk(KERN_DEBUG "ixgbe_stats 0\n");
1120 ixgbe_update_stats(adapter);
1121 printk(KERN_DEBUG "ixgbe_stats 1\n");
1123 for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
1124 p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset;
1125 data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat ==
1126 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1128 for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) {
1129 p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset;
1130 data[i] = (ixgbe_gstrings_stats[j].sizeof_stat ==
1131 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1133 printk(KERN_DEBUG "ixgbe_stats 2\n");
1135 for (j = 0; j < adapter->num_tx_queues; j++) {
1136 queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
1137 for (k = 0; k < stat_count; k++)
1138 data[i + k] = queue_stat[k];
1141 for (j = 0; j < adapter->num_rx_queues; j++) {
1142 queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
1143 for (k = 0; k < stat_count; k++)
1144 data[i + k] = queue_stat[k];
1147 printk(KERN_DEBUG "ixgbe_stats 3\n");
1149 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1150 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
1151 data[i++] = adapter->stats.pxontxc[j];
1152 data[i++] = adapter->stats.pxofftxc[j];
1154 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
1155 data[i++] = adapter->stats.pxonrxc[j];
1156 data[i++] = adapter->stats.pxoffrxc[j];
1159 printk(KERN_DEBUG "ixgbe_stats 4\n");
1160 stat_count = sizeof(struct vf_stats) / sizeof(u64);
1161 for (j = 0; j < adapter->num_vfs; j++) {
1162 queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
1163 for (k = 0; k < stat_count; k++)
1164 data[i + k] = queue_stat[k];
1165 queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
1166 for (k = 0; k < stat_count; k++)
1167 data[i + k] += queue_stat[k];
1172 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1175 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1176 char *p = (char *)data;
1179 switch (stringset) {
1181 memcpy(data, *ixgbe_gstrings_test,
1182 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
1185 for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
1186 memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
1188 p += ETH_GSTRING_LEN;
1190 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1191 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1193 p += ETH_GSTRING_LEN;
1195 for (i = 0; i < adapter->num_tx_queues; i++) {
1196 sprintf(p, "tx_queue_%u_packets", i);
1197 p += ETH_GSTRING_LEN;
1198 sprintf(p, "tx_queue_%u_bytes", i);
1199 p += ETH_GSTRING_LEN;
1201 for (i = 0; i < adapter->num_rx_queues; i++) {
1202 sprintf(p, "rx_queue_%u_packets", i);
1203 p += ETH_GSTRING_LEN;
1204 sprintf(p, "rx_queue_%u_bytes", i);
1205 p += ETH_GSTRING_LEN;
1207 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1208 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1209 sprintf(p, "tx_pb_%u_pxon", i);
1210 p += ETH_GSTRING_LEN;
1211 sprintf(p, "tx_pb_%u_pxoff", i);
1212 p += ETH_GSTRING_LEN;
1214 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1215 sprintf(p, "rx_pb_%u_pxon", i);
1216 p += ETH_GSTRING_LEN;
1217 sprintf(p, "rx_pb_%u_pxoff", i);
1218 p += ETH_GSTRING_LEN;
1221 for (i = 0; i < adapter->num_vfs; i++) {
1222 sprintf(p, "VF %d Rx Packets", i);
1223 p += ETH_GSTRING_LEN;
1224 sprintf(p, "VF %d Rx Bytes", i);
1225 p += ETH_GSTRING_LEN;
1226 sprintf(p, "VF %d Tx Packets", i);
1227 p += ETH_GSTRING_LEN;
1228 sprintf(p, "VF %d Tx Bytes", i);
1229 p += ETH_GSTRING_LEN;
1230 sprintf(p, "VF %d MC Packets", i);
1231 p += ETH_GSTRING_LEN;
1233 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1238 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1240 struct ixgbe_hw *hw = &adapter->hw;
1245 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1253 /* ethtool register test data */
1254 struct ixgbe_reg_test {
1262 /* In the hardware, registers are laid out either singly, in arrays
1263 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1264 * most tests take place on arrays or single registers (handled
1265 * as a single-element array) and special-case the tables.
1266 * Table tests are always pattern tests.
1268 * We also make provision for some required setup steps by specifying
1269 * registers to be written without any read-back testing.
1272 #define PATTERN_TEST 1
1273 #define SET_READ_TEST 2
1274 #define WRITE_NO_TEST 3
1275 #define TABLE32_TEST 4
1276 #define TABLE64_TEST_LO 5
1277 #define TABLE64_TEST_HI 6
1279 /* default 82599 register test */
1280 static struct ixgbe_reg_test reg_test_82599[] = {
1281 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1282 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1283 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1284 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1285 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1286 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1287 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1288 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1289 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1290 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1291 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1292 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1293 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1294 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1295 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1296 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1297 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1298 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1299 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1303 /* default 82598 register test */
1304 static struct ixgbe_reg_test reg_test_82598[] = {
1305 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1306 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1307 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1308 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1309 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1310 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1311 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1312 /* Enable all four RX queues before testing. */
1313 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1314 /* RDH is read-only for 82598, only test RDT. */
1315 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1316 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1317 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1318 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1319 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1320 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1321 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1322 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1323 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1324 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1325 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1326 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1327 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1331 #define REG_PATTERN_TEST(R, M, W) \
1333 u32 pat, val, before; \
1334 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
1335 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
1336 before = readl(adapter->hw.hw_addr + R); \
1337 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
1338 val = readl(adapter->hw.hw_addr + R); \
1339 if (val != (_test[pat] & W & M)) { \
1340 e_err(drv, "pattern test reg %04X failed: got " \
1341 "0x%08X expected 0x%08X\n", \
1342 R, val, (_test[pat] & W & M)); \
1344 writel(before, adapter->hw.hw_addr + R); \
1347 writel(before, adapter->hw.hw_addr + R); \
1351 #define REG_SET_AND_CHECK(R, M, W) \
1354 before = readl(adapter->hw.hw_addr + R); \
1355 writel((W & M), (adapter->hw.hw_addr + R)); \
1356 val = readl(adapter->hw.hw_addr + R); \
1357 if ((W & M) != (val & M)) { \
1358 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
1359 "expected 0x%08X\n", R, (val & M), (W & M)); \
1361 writel(before, (adapter->hw.hw_addr + R)); \
1364 writel(before, (adapter->hw.hw_addr + R)); \
1367 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1369 struct ixgbe_reg_test *test;
1370 u32 value, status_before, status_after;
1373 switch (adapter->hw.mac.type) {
1374 case ixgbe_mac_82598EB:
1375 toggle = 0x7FFFF3FF;
1376 test = reg_test_82598;
1378 case ixgbe_mac_82599EB:
1379 case ixgbe_mac_X540:
1380 toggle = 0x7FFFF30F;
1381 test = reg_test_82599;
1390 * Because the status register is such a special case,
1391 * we handle it separately from the rest of the register
1392 * tests. Some bits are read-only, some toggle, and some
1393 * are writeable on newer MACs.
1395 status_before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1396 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1398 status_after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1399 if (value != status_after) {
1400 e_err(drv, "failed STATUS register test got: "
1401 "0x%08X expected: 0x%08X\n", status_after, value);
1405 /* restore previous status */
1406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, status_before);
1409 * Perform the remainder of the register test, looping through
1410 * the test table until we either fail or reach the null entry.
1413 for (i = 0; i < test->array_len; i++) {
1414 switch (test->test_type) {
1416 REG_PATTERN_TEST(test->reg + (i * 0x40),
1421 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1427 (adapter->hw.hw_addr + test->reg)
1431 REG_PATTERN_TEST(test->reg + (i * 4),
1435 case TABLE64_TEST_LO:
1436 REG_PATTERN_TEST(test->reg + (i * 8),
1440 case TABLE64_TEST_HI:
1441 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1454 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1456 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
1463 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1465 struct net_device *netdev = data;
1466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1468 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1473 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1475 struct net_device *netdev = adapter->netdev;
1476 u32 mask, i = 0, shared_int = true;
1477 u32 irq = adapter->pdev->irq;
1481 /* Hook up test interrupt handler just for this test */
1482 if (adapter->msix_entries) {
1483 /* NOTE: we don't test MSI-X interrupts here, yet */
1485 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1487 if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
1492 } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
1493 netdev->name, netdev)) {
1495 } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
1496 netdev->name, netdev)) {
1500 e_info(hw, "testing %s interrupt\n",
1501 (shared_int ? "shared" : "unshared"));
1503 /* Disable all the interrupts */
1504 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1505 IXGBE_WRITE_FLUSH(&adapter->hw);
1506 usleep_range(10000, 20000);
1508 /* Test each interrupt */
1509 for (; i < 10; i++) {
1510 /* Interrupt to test */
1515 * Disable the interrupts to be reported in
1516 * the cause register and then force the same
1517 * interrupt and see if one gets posted. If
1518 * an interrupt was posted to the bus, the
1521 adapter->test_icr = 0;
1522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1523 ~mask & 0x00007FFF);
1524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1525 ~mask & 0x00007FFF);
1526 IXGBE_WRITE_FLUSH(&adapter->hw);
1527 usleep_range(10000, 20000);
1529 if (adapter->test_icr & mask) {
1536 * Enable the interrupt to be reported in the cause
1537 * register and then force the same interrupt and see
1538 * if one gets posted. If an interrupt was not posted
1539 * to the bus, the test failed.
1541 adapter->test_icr = 0;
1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1544 IXGBE_WRITE_FLUSH(&adapter->hw);
1545 usleep_range(10000, 20000);
1547 if (!(adapter->test_icr & mask)) {
1554 * Disable the other interrupts to be reported in
1555 * the cause register and then force the other
1556 * interrupts and see if any get posted. If
1557 * an interrupt was posted to the bus, the
1560 adapter->test_icr = 0;
1561 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1562 ~mask & 0x00007FFF);
1563 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1564 ~mask & 0x00007FFF);
1565 IXGBE_WRITE_FLUSH(&adapter->hw);
1566 usleep_range(10000, 20000);
1568 if (adapter->test_icr) {
1575 /* Disable all the interrupts */
1576 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1577 IXGBE_WRITE_FLUSH(&adapter->hw);
1578 usleep_range(10000, 20000);
1580 /* Unhook test interrupt handler */
1581 free_irq(irq, netdev);
1588 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1590 struct ixgbe_hw *hw = &adapter->hw;
1593 /* X540 needs to set the MACC.FLU bit to force link up */
1594 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1595 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1596 reg_data |= IXGBE_MACC_FLU;
1597 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1600 /* right now we only support MAC loopback in the driver */
1601 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1602 /* Setup MAC loopback */
1603 reg_data |= IXGBE_HLREG0_LPBK;
1604 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1606 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1607 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1608 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1610 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1611 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1612 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1613 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1614 IXGBE_WRITE_FLUSH(hw);
1615 usleep_range(10000, 20000);
1617 /* Disable Atlas Tx lanes; re-enabled in reset path */
1618 if (hw->mac.type == ixgbe_mac_82598EB) {
1621 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1622 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1623 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1625 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1626 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1627 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1629 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1630 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1631 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1633 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1634 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1635 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1641 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1645 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1646 reg_data &= ~IXGBE_HLREG0_LPBK;
1647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1655 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1658 //*data = ixgbe_setup_desc_rings(adapter);
1661 *data = ixgbe_setup_loopback_test(adapter);
1664 //*data = ixgbe_run_loopback_test(adapter);
1665 ixgbe_loopback_cleanup(adapter);
1668 //ixgbe_free_desc_rings(adapter);
1674 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
1675 static int ixgbe_diag_test_count(struct net_device *netdev)
1677 return IXGBE_TEST_LEN;
1680 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
1681 static void ixgbe_diag_test(struct net_device *netdev,
1682 struct ethtool_test *eth_test, u64 *data)
1684 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1685 bool if_running = netif_running(netdev);
1687 set_bit(__IXGBE_TESTING, &adapter->state);
1688 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1691 e_info(hw, "offline testing starting\n");
1693 /* Link test performed before hardware reset so autoneg doesn't
1694 * interfere with test result */
1695 if (ixgbe_link_test(adapter, &data[4]))
1696 eth_test->flags |= ETH_TEST_FL_FAILED;
1698 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1700 for (i = 0; i < adapter->num_vfs; i++) {
1701 if (adapter->vfinfo[i].clear_to_send) {
1702 e_warn(drv, "Please take active VFS "
1703 "offline and restart the "
1704 "adapter before running NIC "
1710 eth_test->flags |= ETH_TEST_FL_FAILED;
1711 clear_bit(__IXGBE_TESTING,
1719 /* indicate we're in test mode */
1722 ixgbe_reset(adapter);
1724 e_info(hw, "register testing starting\n");
1725 if (ixgbe_reg_test(adapter, &data[0]))
1726 eth_test->flags |= ETH_TEST_FL_FAILED;
1728 ixgbe_reset(adapter);
1729 e_info(hw, "eeprom testing starting\n");
1730 if (ixgbe_eeprom_test(adapter, &data[1]))
1731 eth_test->flags |= ETH_TEST_FL_FAILED;
1733 ixgbe_reset(adapter);
1734 e_info(hw, "interrupt testing starting\n");
1735 if (ixgbe_intr_test(adapter, &data[2]))
1736 eth_test->flags |= ETH_TEST_FL_FAILED;
1738 /* If SRIOV or VMDq is enabled then skip MAC
1739 * loopback diagnostic. */
1740 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1741 IXGBE_FLAG_VMDQ_ENABLED)) {
1742 e_info(hw, "skip MAC loopback diagnostic in VT mode\n");
1747 ixgbe_reset(adapter);
1748 e_info(hw, "loopback testing starting\n");
1749 if (ixgbe_loopback_test(adapter, &data[3]))
1750 eth_test->flags |= ETH_TEST_FL_FAILED;
1753 ixgbe_reset(adapter);
1755 clear_bit(__IXGBE_TESTING, &adapter->state);
1759 e_info(hw, "online testing starting\n");
1761 if (ixgbe_link_test(adapter, &data[4]))
1762 eth_test->flags |= ETH_TEST_FL_FAILED;
1764 /* Online tests aren't run; pass by default */
1770 clear_bit(__IXGBE_TESTING, &adapter->state);
1773 msleep_interruptible(4 * 1000);
1776 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1777 struct ethtool_wolinfo *wol)
1779 struct ixgbe_hw *hw = &adapter->hw;
1781 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
1783 /* WOL not supported except for the following */
1784 switch (hw->device_id) {
1785 case IXGBE_DEV_ID_82599_SFP:
1786 /* Only these subdevice could supports WOL */
1787 switch (hw->subsystem_device_id) {
1788 case IXGBE_SUBDEV_ID_82599_560FLR:
1789 /* only support first port */
1790 if (hw->bus.func != 0) {
1794 case IXGBE_SUBDEV_ID_82599_SFP:
1802 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1803 /* All except this subdevice support WOL */
1804 if (hw->subsystem_device_id ==
1805 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1811 case IXGBE_DEV_ID_82599_KX4:
1814 case IXGBE_DEV_ID_X540T:
1815 /* check eeprom to see if enabled wol */
1816 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1817 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1818 (hw->bus.func == 0))) {
1823 /* All others not supported */
1832 static void ixgbe_get_wol(struct net_device *netdev,
1833 struct ethtool_wolinfo *wol)
1835 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1837 wol->supported = WAKE_UCAST | WAKE_MCAST |
1838 WAKE_BCAST | WAKE_MAGIC;
1841 if (ixgbe_wol_exclusion(adapter, wol) ||
1842 !device_can_wakeup(&adapter->pdev->dev))
1845 if (adapter->wol & IXGBE_WUFC_EX)
1846 wol->wolopts |= WAKE_UCAST;
1847 if (adapter->wol & IXGBE_WUFC_MC)
1848 wol->wolopts |= WAKE_MCAST;
1849 if (adapter->wol & IXGBE_WUFC_BC)
1850 wol->wolopts |= WAKE_BCAST;
1851 if (adapter->wol & IXGBE_WUFC_MAG)
1852 wol->wolopts |= WAKE_MAGIC;
1855 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1857 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1859 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1862 if (ixgbe_wol_exclusion(adapter, wol))
1863 return wol->wolopts ? -EOPNOTSUPP : 0;
1867 if (wol->wolopts & WAKE_UCAST)
1868 adapter->wol |= IXGBE_WUFC_EX;
1869 if (wol->wolopts & WAKE_MCAST)
1870 adapter->wol |= IXGBE_WUFC_MC;
1871 if (wol->wolopts & WAKE_BCAST)
1872 adapter->wol |= IXGBE_WUFC_BC;
1873 if (wol->wolopts & WAKE_MAGIC)
1874 adapter->wol |= IXGBE_WUFC_MAG;
1876 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1881 static int ixgbe_nway_reset(struct net_device *netdev)
1883 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1885 if (netif_running(netdev))
1886 ixgbe_reinit_locked(adapter);
1891 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
1892 static int ixgbe_set_phys_id(struct net_device *netdev,
1893 enum ethtool_phys_id_state state)
1895 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1896 struct ixgbe_hw *hw = &adapter->hw;
1899 case ETHTOOL_ID_ACTIVE:
1900 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1904 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
1907 case ETHTOOL_ID_OFF:
1908 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
1911 case ETHTOOL_ID_INACTIVE:
1912 /* Restore LED settings */
1913 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
1920 static int ixgbe_phys_id(struct net_device *netdev, u32 data)
1922 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1923 struct ixgbe_hw *hw = &adapter->hw;
1924 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1927 if (!data || data > 300)
1930 for (i = 0; i < (data * 1000); i += 400) {
1931 ixgbe_led_on(hw, IXGBE_LED_ON);
1932 msleep_interruptible(200);
1933 ixgbe_led_off(hw, IXGBE_LED_ON);
1934 msleep_interruptible(200);
1937 /* Restore LED settings */
1938 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1942 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
1944 static int ixgbe_get_coalesce(struct net_device *netdev,
1945 struct ethtool_coalesce *ec)
1947 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1949 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
1950 #ifndef CONFIG_IXGBE_NAPI
1951 ec->rx_max_coalesced_frames_irq = adapter->rx_work_limit;
1952 #endif /* CONFIG_IXGBE_NAPI */
1953 /* only valid if in constant ITR mode */
1954 if (adapter->rx_itr_setting <= 1)
1955 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1957 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1959 /* if in mixed tx/rx queues per vector mode, report only rx settings */
1960 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
1963 /* only valid if in constant ITR mode */
1964 if (adapter->tx_itr_setting <= 1)
1965 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1967 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1973 * this function must be called before setting the new value of
1977 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
1979 struct net_device *netdev = adapter->netdev;
1981 /* nothing to do if LRO or RSC are not enabled */
1982 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
1983 !(netdev->features & NETIF_F_LRO))
1986 /* check the feature flag value and enable RSC if necessary */
1987 if (adapter->rx_itr_setting == 1 ||
1988 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
1989 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
1990 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
1991 e_info(probe, "rx-usecs value high enough "
1992 "to re-enable RSC\n");
1995 /* if interrupt rate is too high then disable RSC */
1996 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1997 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
1999 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2001 e_info(probe, "rx-usecs set too low, "
2002 "falling back to software LRO\n");
2010 static int ixgbe_set_coalesce(struct net_device *netdev,
2011 struct ethtool_coalesce *ec)
2014 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2015 struct ixgbe_q_vector *q_vector;
2018 u16 tx_itr_param, rx_itr_param;
2019 bool need_reset = false;
2021 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2022 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2023 && ec->tx_coalesce_usecs)
2026 if (ec->tx_max_coalesced_frames_irq)
2027 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
2029 #ifndef CONFIG_IXGBE_NAPI
2030 if (ec->rx_max_coalesced_frames_irq)
2031 adapter->rx_work_limit = ec->rx_max_coalesced_frames_irq;
2034 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2035 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2038 if (ec->rx_coalesce_usecs > 1)
2039 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2041 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2043 if (adapter->rx_itr_setting == 1)
2044 rx_itr_param = IXGBE_20K_ITR;
2046 rx_itr_param = adapter->rx_itr_setting;
2048 if (ec->tx_coalesce_usecs > 1)
2049 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2051 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2053 if (adapter->tx_itr_setting == 1)
2054 tx_itr_param = IXGBE_10K_ITR;
2056 tx_itr_param = adapter->tx_itr_setting;
2058 /* check the old value and enable RSC if necessary */
2059 need_reset = ixgbe_update_rsc(adapter);
2061 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2062 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2066 for (i = 0; i < num_vectors; i++) {
2067 q_vector = adapter->q_vector[i];
2068 q_vector->tx.work_limit = adapter->tx_work_limit;
2069 q_vector->rx.work_limit = adapter->rx_work_limit;
2070 if (q_vector->tx.count && !q_vector->rx.count)
2072 q_vector->itr = tx_itr_param;
2074 /* rx only or mixed */
2075 q_vector->itr = rx_itr_param;
2076 ixgbe_write_eitr(q_vector);
2080 * do reset here at the end to make sure EITR==0 case is handled
2081 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2082 * also locks in RSC enable/disable which requires reset
2085 ixgbe_do_reset(netdev);
2090 #ifndef HAVE_NDO_SET_FEATURES
2091 static u32 ixgbe_get_rx_csum(struct net_device *netdev)
2093 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2094 struct ixgbe_ring *ring = adapter->rx_ring[0];
2095 return test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2098 static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
2100 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2103 for (i = 0; i < adapter->num_rx_queues; i++) {
2104 struct ixgbe_ring *ring = adapter->rx_ring[i];
2106 set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2108 clear_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2111 /* LRO and RSC both depend on RX checksum to function */
2112 if (!data && (netdev->features & NETIF_F_LRO)) {
2113 netdev->features &= ~NETIF_F_LRO;
2115 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2116 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2117 ixgbe_do_reset(netdev);
2124 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
2126 return (netdev->features & NETIF_F_IP_CSUM) != 0;
2129 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
2131 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2134 #ifdef NETIF_F_IPV6_CSUM
2135 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2137 feature_list = NETIF_F_IP_CSUM;
2139 switch (adapter->hw.mac.type) {
2140 case ixgbe_mac_82599EB:
2141 case ixgbe_mac_X540:
2142 feature_list |= NETIF_F_SCTP_CSUM;
2148 netdev->features |= feature_list;
2150 netdev->features &= ~feature_list;
2156 static int ixgbe_set_tso(struct net_device *netdev, u32 data)
2159 netdev->features |= NETIF_F_TSO;
2161 netdev->features |= NETIF_F_TSO6;
2164 #ifndef HAVE_NETDEV_VLAN_FEATURES
2165 #ifdef NETIF_F_HW_VLAN_TX
2166 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2167 /* disable TSO on all VLANs if they're present */
2168 if (adapter->vlgrp) {
2170 struct net_device *v_netdev;
2171 for (i = 0; i < VLAN_N_VID; i++) {
2173 vlan_group_get_device(adapter->vlgrp, i);
2175 v_netdev->features &= ~NETIF_F_TSO;
2177 v_netdev->features &= ~NETIF_F_TSO6;
2179 vlan_group_set_device(adapter->vlgrp, i,
2185 #endif /* HAVE_NETDEV_VLAN_FEATURES */
2186 netdev->features &= ~NETIF_F_TSO;
2188 netdev->features &= ~NETIF_F_TSO6;
2194 #endif /* NETIF_F_TSO */
2195 #ifdef ETHTOOL_GFLAGS
2196 static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2199 u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
2200 u32 changed = netdev->features ^ data;
2201 bool need_reset = false;
2204 #ifndef HAVE_VLAN_RX_REGISTER
2205 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2206 !(data & ETH_FLAG_RXVLAN))
2210 #ifdef NETIF_F_RXHASH
2211 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2212 supported_flags |= ETH_FLAG_RXHASH;
2215 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
2217 supported_flags |= ETH_FLAG_LRO;
2219 #ifdef ETHTOOL_GRXRINGS
2220 switch (adapter->hw.mac.type) {
2221 case ixgbe_mac_X540:
2222 case ixgbe_mac_82599EB:
2223 supported_flags |= ETH_FLAG_NTUPLE;
2229 rc = ethtool_op_set_flags(netdev, data, supported_flags);
2233 #ifndef HAVE_VLAN_RX_REGISTER
2234 if (changed & ETH_FLAG_RXVLAN)
2235 ixgbe_vlan_mode(netdev, netdev->features);
2238 /* if state changes we need to update adapter->flags and reset */
2239 if (!(netdev->features & NETIF_F_LRO)) {
2240 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2242 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2243 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2244 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2245 if (adapter->rx_itr_setting == 1 ||
2246 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2247 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2249 } else if (changed & ETH_FLAG_LRO) {
2251 e_info(probe, "rx-usecs set too low, "
2254 e_info(probe, "rx-usecs set too low, "
2255 "falling back to software LRO\n");
2260 #ifdef ETHTOOL_GRXRINGS
2262 * Check if Flow Director n-tuple support was enabled or disabled. If
2263 * the state changed, we need to reset.
2265 if (!(netdev->features & NETIF_F_NTUPLE)) {
2266 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2267 /* turn off Flow Director, set ATR and reset */
2268 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
2269 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2270 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2273 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2274 } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
2275 /* turn off ATR, enable perfect filters and reset */
2276 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2277 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2281 #endif /* ETHTOOL_GRXRINGS */
2283 ixgbe_do_reset(netdev);
2288 #endif /* ETHTOOL_GFLAGS */
2289 #endif /* HAVE_NDO_SET_FEATURES */
2290 #ifdef ETHTOOL_GRXRINGS
2291 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2292 struct ethtool_rxnfc *cmd)
2294 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2295 struct ethtool_rx_flow_spec *fsp =
2296 (struct ethtool_rx_flow_spec *)&cmd->fs;
2297 struct hlist_node *node, *node2;
2298 struct ixgbe_fdir_filter *rule = NULL;
2300 /* report total rule count */
2301 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2303 hlist_for_each_entry_safe(rule, node, node2,
2304 &adapter->fdir_filter_list, fdir_node) {
2305 if (fsp->location <= rule->sw_idx)
2309 if (!rule || fsp->location != rule->sw_idx)
2312 /* fill out the flow spec entry */
2314 /* set flow type field */
2315 switch (rule->filter.formatted.flow_type) {
2316 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2317 fsp->flow_type = TCP_V4_FLOW;
2319 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2320 fsp->flow_type = UDP_V4_FLOW;
2322 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2323 fsp->flow_type = SCTP_V4_FLOW;
2325 case IXGBE_ATR_FLOW_TYPE_IPV4:
2326 fsp->flow_type = IP_USER_FLOW;
2327 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2328 fsp->h_u.usr_ip4_spec.proto = 0;
2329 fsp->m_u.usr_ip4_spec.proto = 0;
2335 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2336 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2337 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2338 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2339 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2340 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2341 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2342 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2343 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2344 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2345 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2346 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2347 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2348 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2349 fsp->flow_type |= FLOW_EXT;
2352 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2353 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2355 fsp->ring_cookie = rule->action;
2360 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2361 struct ethtool_rxnfc *cmd,
2364 struct hlist_node *node, *node2;
2365 struct ixgbe_fdir_filter *rule;
2368 /* report total rule count */
2369 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2371 hlist_for_each_entry_safe(rule, node, node2,
2372 &adapter->fdir_filter_list, fdir_node) {
2373 if (cnt == cmd->rule_cnt)
2375 rule_locs[cnt] = rule->sw_idx;
2379 cmd->rule_cnt = cnt;
2384 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2385 struct ethtool_rxnfc *cmd)
2389 /* if RSS is disabled then report no hashing */
2390 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2393 /* Report default options for RSS on ixgbe */
2394 switch (cmd->flow_type) {
2396 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2398 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2399 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2401 case AH_ESP_V4_FLOW:
2405 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2408 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2410 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2411 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2413 case AH_ESP_V6_FLOW:
2417 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2426 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2427 #ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
2433 struct ixgbe_adapter *adapter = netdev_priv(dev);
2434 int ret = -EOPNOTSUPP;
2437 case ETHTOOL_GRXRINGS:
2438 cmd->data = adapter->num_rx_queues;
2441 case ETHTOOL_GRXCLSRLCNT:
2442 cmd->rule_cnt = adapter->fdir_filter_count;
2445 case ETHTOOL_GRXCLSRULE:
2446 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2448 case ETHTOOL_GRXCLSRLALL:
2449 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
2453 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2462 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2463 struct ixgbe_fdir_filter *input,
2466 struct ixgbe_hw *hw = &adapter->hw;
2467 struct hlist_node *node, *node2, *parent;
2468 struct ixgbe_fdir_filter *rule;
2474 hlist_for_each_entry_safe(rule, node, node2,
2475 &adapter->fdir_filter_list, fdir_node) {
2476 /* hash found, or no matching entry */
2477 if (rule->sw_idx >= sw_idx)
2482 /* if there is an old rule occupying our place remove it */
2483 if (rule && (rule->sw_idx == sw_idx)) {
2484 if (!input || (rule->filter.formatted.bkt_hash !=
2485 input->filter.formatted.bkt_hash)) {
2486 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2491 hlist_del(&rule->fdir_node);
2493 adapter->fdir_filter_count--;
2497 * If no input this was a delete, err should be 0 if a rule was
2498 * successfully found and removed from the list else -EINVAL
2503 /* initialize node and set software index */
2504 INIT_HLIST_NODE(&input->fdir_node);
2506 /* add filter to the list */
2508 hlist_add_after(parent, &input->fdir_node);
2510 hlist_add_head(&input->fdir_node,
2511 &adapter->fdir_filter_list);
2514 adapter->fdir_filter_count++;
2519 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2522 switch (fsp->flow_type & ~FLOW_EXT) {
2524 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2527 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2530 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2533 switch (fsp->h_u.usr_ip4_spec.proto) {
2535 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2538 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2541 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2544 if (!fsp->m_u.usr_ip4_spec.proto) {
2545 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2559 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2560 struct ethtool_rxnfc *cmd)
2562 struct ethtool_rx_flow_spec *fsp =
2563 (struct ethtool_rx_flow_spec *)&cmd->fs;
2564 struct ixgbe_hw *hw = &adapter->hw;
2565 struct ixgbe_fdir_filter *input;
2566 union ixgbe_atr_input mask;
2569 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2573 * Don't allow programming if the action is a queue greater than
2574 * the number of online Rx queues.
2576 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2577 (fsp->ring_cookie >= adapter->num_rx_queues))
2580 /* Don't allow indexes to exist outside of available space */
2581 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2582 e_err(drv, "Location out of range\n");
2586 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2590 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2593 input->sw_idx = fsp->location;
2595 /* record flow type */
2596 if (!ixgbe_flowspec_to_flow_type(fsp,
2597 &input->filter.formatted.flow_type)) {
2598 e_err(drv, "Unrecognized flow type\n");
2602 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2603 IXGBE_ATR_L4TYPE_MASK;
2605 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2606 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2608 /* Copy input into formatted structures */
2609 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2610 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2611 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2612 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2613 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2614 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2615 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2616 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2618 if (fsp->flow_type & FLOW_EXT) {
2619 input->filter.formatted.vm_pool =
2620 (unsigned char)ntohl(fsp->h_ext.data[1]);
2621 mask.formatted.vm_pool =
2622 (unsigned char)ntohl(fsp->m_ext.data[1]);
2623 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2624 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2625 input->filter.formatted.flex_bytes =
2626 fsp->h_ext.vlan_etype;
2627 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2630 /* determine if we need to drop or route the packet */
2631 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2632 input->action = IXGBE_FDIR_DROP_QUEUE;
2634 input->action = fsp->ring_cookie;
2636 spin_lock(&adapter->fdir_perfect_lock);
2638 if (hlist_empty(&adapter->fdir_filter_list)) {
2639 /* save mask and program input mask into HW */
2640 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2641 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2643 e_err(drv, "Error writing mask\n");
2644 goto err_out_w_lock;
2646 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2647 e_err(drv, "Only one mask supported per port\n");
2648 goto err_out_w_lock;
2651 /* apply mask and compute/store hash */
2652 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2654 /* program filters to filter memory */
2655 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2656 &input->filter, input->sw_idx,
2657 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2658 IXGBE_FDIR_DROP_QUEUE :
2659 adapter->rx_ring[input->action]->reg_idx);
2661 goto err_out_w_lock;
2663 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2665 spin_unlock(&adapter->fdir_perfect_lock);
2670 spin_unlock(&adapter->fdir_perfect_lock);
2676 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2677 struct ethtool_rxnfc *cmd)
2679 struct ethtool_rx_flow_spec *fsp =
2680 (struct ethtool_rx_flow_spec *)&cmd->fs;
2683 spin_lock(&adapter->fdir_perfect_lock);
2684 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, (u16)(fsp->location));
2685 spin_unlock(&adapter->fdir_perfect_lock);
2690 #ifdef ETHTOOL_SRXNTUPLE
2692 * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
2693 * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
2694 * was defined that this function was present.
2696 static int ixgbe_set_rx_ntuple(struct net_device *dev,
2697 struct ethtool_rx_ntuple *cmd)
2703 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2704 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2705 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2706 struct ethtool_rxnfc *nfc)
2708 u32 flags2 = adapter->flags2;
2711 * RSS does not support anything other than hashing
2712 * to queues on src and dst IPs and ports
2714 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2715 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2718 switch (nfc->flow_type) {
2721 if (!(nfc->data & RXH_IP_SRC) ||
2722 !(nfc->data & RXH_IP_DST) ||
2723 !(nfc->data & RXH_L4_B_0_1) ||
2724 !(nfc->data & RXH_L4_B_2_3))
2728 if (!(nfc->data & RXH_IP_SRC) ||
2729 !(nfc->data & RXH_IP_DST))
2731 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2733 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2735 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2736 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2743 if (!(nfc->data & RXH_IP_SRC) ||
2744 !(nfc->data & RXH_IP_DST))
2746 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2748 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2750 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2751 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2757 case AH_ESP_V4_FLOW:
2761 case AH_ESP_V6_FLOW:
2765 if (!(nfc->data & RXH_IP_SRC) ||
2766 !(nfc->data & RXH_IP_DST) ||
2767 (nfc->data & RXH_L4_B_0_1) ||
2768 (nfc->data & RXH_L4_B_2_3))
2775 /* if we changed something we need to update flags */
2776 if (flags2 != adapter->flags2) {
2777 struct ixgbe_hw *hw = &adapter->hw;
2778 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2780 if ((flags2 & UDP_RSS_FLAGS) &&
2781 !(adapter->flags2 & UDP_RSS_FLAGS))
2782 e_warn(drv, "enabling UDP RSS: fragmented packets"
2783 " may arrive out of order to the stack above\n");
2785 adapter->flags2 = flags2;
2787 /* Perform hash on these packet types */
2788 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2789 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2790 | IXGBE_MRQC_RSS_FIELD_IPV6
2791 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2793 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2794 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2796 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2797 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2799 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2800 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2802 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2808 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2810 struct ixgbe_adapter *adapter = netdev_priv(dev);
2811 int ret = -EOPNOTSUPP;
2814 case ETHTOOL_SRXCLSRLINS:
2815 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2817 case ETHTOOL_SRXCLSRLDEL:
2818 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2821 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2830 #endif /* ETHTOOL_GRXRINGS */
2832 struct ethtool_ops ixgbe_ethtool_ops = {
2833 .get_settings = ixgbe_get_settings,
2834 .set_settings = ixgbe_set_settings,
2835 .get_drvinfo = ixgbe_get_drvinfo,
2836 .get_regs_len = ixgbe_get_regs_len,
2837 .get_regs = ixgbe_get_regs,
2838 .get_wol = ixgbe_get_wol,
2839 .set_wol = ixgbe_set_wol,
2840 .nway_reset = ixgbe_nway_reset,
2841 .get_link = ethtool_op_get_link,
2842 .get_eeprom_len = ixgbe_get_eeprom_len,
2843 .get_eeprom = ixgbe_get_eeprom,
2844 .set_eeprom = ixgbe_set_eeprom,
2845 .get_ringparam = ixgbe_get_ringparam,
2846 .set_ringparam = ixgbe_set_ringparam,
2847 .get_pauseparam = ixgbe_get_pauseparam,
2848 .set_pauseparam = ixgbe_set_pauseparam,
2849 .get_msglevel = ixgbe_get_msglevel,
2850 .set_msglevel = ixgbe_set_msglevel,
2851 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
2852 .self_test_count = ixgbe_diag_test_count,
2853 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
2854 .self_test = ixgbe_diag_test,
2855 .get_strings = ixgbe_get_strings,
2856 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
2857 .set_phys_id = ixgbe_set_phys_id,
2859 .phys_id = ixgbe_phys_id,
2860 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
2861 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
2862 .get_stats_count = ixgbe_get_stats_count,
2863 #else /* HAVE_ETHTOOL_GET_SSET_COUNT */
2864 .get_sset_count = ixgbe_get_sset_count,
2865 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
2866 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2867 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
2868 .get_perm_addr = ethtool_op_get_perm_addr,
2870 .get_coalesce = ixgbe_get_coalesce,
2871 .set_coalesce = ixgbe_set_coalesce,
2872 #ifndef HAVE_NDO_SET_FEATURES
2873 .get_rx_csum = ixgbe_get_rx_csum,
2874 .set_rx_csum = ixgbe_set_rx_csum,
2875 .get_tx_csum = ixgbe_get_tx_csum,
2876 .set_tx_csum = ixgbe_set_tx_csum,
2877 .get_sg = ethtool_op_get_sg,
2878 .set_sg = ethtool_op_set_sg,
2880 .get_tso = ethtool_op_get_tso,
2881 .set_tso = ixgbe_set_tso,
2883 #ifdef ETHTOOL_GFLAGS
2884 .get_flags = ethtool_op_get_flags,
2885 .set_flags = ixgbe_set_flags,
2887 #endif /* HAVE_NDO_SET_FEATURES */
2888 #ifdef ETHTOOL_GRXRINGS
2889 .get_rxnfc = ixgbe_get_rxnfc,
2890 .set_rxnfc = ixgbe_set_rxnfc,
2891 #ifdef ETHTOOL_SRXNTUPLE
2892 .set_rx_ntuple = ixgbe_set_rx_ntuple,
2897 void ixgbe_set_ethtool_ops(struct net_device *netdev)
2899 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2901 #endif /* SIOCETHTOOL */