1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
4 Intel(R) Gigabit Ethernet Linux driver
5 Copyright(c) 2007-2013 Intel Corporation.
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11 *******************************************************************************/
13 /* ethtool support for igb */
15 #include <linux/netdevice.h>
16 #include <linux/vmalloc.h>
19 #include <linux/ethtool.h>
20 #ifdef CONFIG_PM_RUNTIME
21 #include <linux/pm_runtime.h>
22 #endif /* CONFIG_PM_RUNTIME */
23 #include <linux/highmem.h>
26 #include "igb_regtest.h"
27 #include <linux/if_vlan.h>
29 #include <linux/mdio.h>
32 #ifdef ETHTOOL_OPS_COMPAT
33 #include "kcompat_ethtool.c"
37 char stat_string[ETH_GSTRING_LEN];
42 #define IGB_STAT(_name, _stat) { \
43 .stat_string = _name, \
44 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
45 .stat_offset = offsetof(struct igb_adapter, _stat) \
47 static const struct igb_stats igb_gstrings_stats[] = {
48 IGB_STAT("rx_packets", stats.gprc),
49 IGB_STAT("tx_packets", stats.gptc),
50 IGB_STAT("rx_bytes", stats.gorc),
51 IGB_STAT("tx_bytes", stats.gotc),
52 IGB_STAT("rx_broadcast", stats.bprc),
53 IGB_STAT("tx_broadcast", stats.bptc),
54 IGB_STAT("rx_multicast", stats.mprc),
55 IGB_STAT("tx_multicast", stats.mptc),
56 IGB_STAT("multicast", stats.mprc),
57 IGB_STAT("collisions", stats.colc),
58 IGB_STAT("rx_crc_errors", stats.crcerrs),
59 IGB_STAT("rx_no_buffer_count", stats.rnbc),
60 IGB_STAT("rx_missed_errors", stats.mpc),
61 IGB_STAT("tx_aborted_errors", stats.ecol),
62 IGB_STAT("tx_carrier_errors", stats.tncrs),
63 IGB_STAT("tx_window_errors", stats.latecol),
64 IGB_STAT("tx_abort_late_coll", stats.latecol),
65 IGB_STAT("tx_deferred_ok", stats.dc),
66 IGB_STAT("tx_single_coll_ok", stats.scc),
67 IGB_STAT("tx_multi_coll_ok", stats.mcc),
68 IGB_STAT("tx_timeout_count", tx_timeout_count),
69 IGB_STAT("rx_long_length_errors", stats.roc),
70 IGB_STAT("rx_short_length_errors", stats.ruc),
71 IGB_STAT("rx_align_errors", stats.algnerrc),
72 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
73 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
74 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
75 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
76 IGB_STAT("tx_flow_control_xon", stats.xontxc),
77 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
78 IGB_STAT("rx_long_byte_count", stats.gorc),
79 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
81 IGB_STAT("lro_aggregated", lro_stats.coal),
82 IGB_STAT("lro_flushed", lro_stats.flushed),
84 IGB_STAT("tx_smbus", stats.mgptc),
85 IGB_STAT("rx_smbus", stats.mgprc),
86 IGB_STAT("dropped_smbus", stats.mgpdc),
87 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
88 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
89 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
90 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
91 #ifdef HAVE_PTP_1588_CLOCK
92 IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
93 IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
94 #endif /* HAVE_PTP_1588_CLOCK */
97 #define IGB_NETDEV_STAT(_net_stat) { \
98 .stat_string = #_net_stat, \
99 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
100 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
102 static const struct igb_stats igb_gstrings_net_stats[] = {
103 IGB_NETDEV_STAT(rx_errors),
104 IGB_NETDEV_STAT(tx_errors),
105 IGB_NETDEV_STAT(tx_dropped),
106 IGB_NETDEV_STAT(rx_length_errors),
107 IGB_NETDEV_STAT(rx_over_errors),
108 IGB_NETDEV_STAT(rx_frame_errors),
109 IGB_NETDEV_STAT(rx_fifo_errors),
110 IGB_NETDEV_STAT(tx_fifo_errors),
111 IGB_NETDEV_STAT(tx_heartbeat_errors)
114 #define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
115 #define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
116 #define IGB_RX_QUEUE_STATS_LEN \
117 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
118 #define IGB_TX_QUEUE_STATS_LEN \
119 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
120 #define IGB_QUEUE_STATS_LEN \
121 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
122 IGB_RX_QUEUE_STATS_LEN) + \
123 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
124 IGB_TX_QUEUE_STATS_LEN))
125 #define IGB_STATS_LEN \
126 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
128 #endif /* ETHTOOL_GSTATS */
130 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
131 "Register test (offline)", "Eeprom test (offline)",
132 "Interrupt test (offline)", "Loopback test (offline)",
133 "Link test (on/offline)"
135 #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
136 #endif /* ETHTOOL_TEST */
138 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
140 struct igb_adapter *adapter = netdev_priv(netdev);
141 struct e1000_hw *hw = &adapter->hw;
144 if (hw->phy.media_type == e1000_media_type_copper) {
146 ecmd->supported = (SUPPORTED_10baseT_Half |
147 SUPPORTED_10baseT_Full |
148 SUPPORTED_100baseT_Half |
149 SUPPORTED_100baseT_Full |
150 SUPPORTED_1000baseT_Full|
154 ecmd->advertising = ADVERTISED_TP;
156 if (hw->mac.autoneg == 1) {
157 ecmd->advertising |= ADVERTISED_Autoneg;
158 /* the e1000 autoneg seems to match ethtool nicely */
159 ecmd->advertising |= hw->phy.autoneg_advertised;
162 ecmd->port = PORT_TP;
163 ecmd->phy_address = hw->phy.addr;
164 ecmd->transceiver = XCVR_INTERNAL;
167 ecmd->supported = (SUPPORTED_1000baseT_Full |
168 SUPPORTED_100baseT_Full |
172 if (hw->mac.type == e1000_i354)
173 ecmd->supported |= (SUPPORTED_2500baseX_Full);
175 ecmd->advertising = ADVERTISED_FIBRE;
177 switch (adapter->link_speed) {
179 ecmd->advertising = ADVERTISED_2500baseX_Full;
182 ecmd->advertising = ADVERTISED_1000baseT_Full;
185 ecmd->advertising = ADVERTISED_100baseT_Full;
191 if (hw->mac.autoneg == 1)
192 ecmd->advertising |= ADVERTISED_Autoneg;
194 ecmd->port = PORT_FIBRE;
195 ecmd->transceiver = XCVR_EXTERNAL;
198 if (hw->mac.autoneg != 1)
199 ecmd->advertising &= ~(ADVERTISED_Pause |
200 ADVERTISED_Asym_Pause);
202 if (hw->fc.requested_mode == e1000_fc_full)
203 ecmd->advertising |= ADVERTISED_Pause;
204 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
205 ecmd->advertising |= (ADVERTISED_Pause |
206 ADVERTISED_Asym_Pause);
207 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
208 ecmd->advertising |= ADVERTISED_Asym_Pause;
210 ecmd->advertising &= ~(ADVERTISED_Pause |
211 ADVERTISED_Asym_Pause);
213 status = E1000_READ_REG(hw, E1000_STATUS);
215 if (status & E1000_STATUS_LU) {
216 if ((hw->mac.type == e1000_i354) &&
217 (status & E1000_STATUS_2P5_SKU) &&
218 !(status & E1000_STATUS_2P5_SKU_OVER))
219 ecmd->speed = SPEED_2500;
220 else if (status & E1000_STATUS_SPEED_1000)
221 ecmd->speed = SPEED_1000;
222 else if (status & E1000_STATUS_SPEED_100)
223 ecmd->speed = SPEED_100;
225 ecmd->speed = SPEED_10;
227 if ((status & E1000_STATUS_FD) ||
228 hw->phy.media_type != e1000_media_type_copper)
229 ecmd->duplex = DUPLEX_FULL;
231 ecmd->duplex = DUPLEX_HALF;
238 if ((hw->phy.media_type == e1000_media_type_fiber) ||
240 ecmd->autoneg = AUTONEG_ENABLE;
242 ecmd->autoneg = AUTONEG_DISABLE;
245 /* MDI-X => 2; MDI =>1; Invalid =>0 */
246 if (hw->phy.media_type == e1000_media_type_copper)
247 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
250 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
252 #ifdef ETH_TP_MDI_AUTO
253 if (hw->phy.mdix == AUTO_ALL_MODES)
254 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
256 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
259 #endif /* ETH_TP_MDI_X */
263 static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
265 struct igb_adapter *adapter = netdev_priv(netdev);
266 struct e1000_hw *hw = &adapter->hw;
268 if (ecmd->duplex == DUPLEX_HALF) {
269 if (!hw->dev_spec._82575.eee_disable)
270 dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
271 hw->dev_spec._82575.eee_disable = true;
273 if (hw->dev_spec._82575.eee_disable)
274 dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
275 hw->dev_spec._82575.eee_disable = false;
278 /* When SoL/IDER sessions are active, autoneg/speed/duplex
279 * cannot be changed */
280 if (e1000_check_reset_block(hw)) {
281 dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
282 "characteristics when SoL/IDER is active.\n");
286 #ifdef ETH_TP_MDI_AUTO
288 * MDI setting is only allowed when autoneg enabled because
289 * some hardware doesn't allow MDI setting when speed or
292 if (ecmd->eth_tp_mdix_ctrl) {
293 if (hw->phy.media_type != e1000_media_type_copper)
296 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
297 (ecmd->autoneg != AUTONEG_ENABLE)) {
298 dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
303 #endif /* ETH_TP_MDI_AUTO */
304 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
305 usleep_range(1000, 2000);
307 if (ecmd->autoneg == AUTONEG_ENABLE) {
309 if (hw->phy.media_type == e1000_media_type_fiber) {
310 hw->phy.autoneg_advertised = ecmd->advertising |
313 switch (adapter->link_speed) {
315 hw->phy.autoneg_advertised =
316 ADVERTISED_2500baseX_Full;
319 hw->phy.autoneg_advertised =
320 ADVERTISED_1000baseT_Full;
323 hw->phy.autoneg_advertised =
324 ADVERTISED_100baseT_Full;
330 hw->phy.autoneg_advertised = ecmd->advertising |
334 ecmd->advertising = hw->phy.autoneg_advertised;
335 if (adapter->fc_autoneg)
336 hw->fc.requested_mode = e1000_fc_default;
338 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
339 clear_bit(__IGB_RESETTING, &adapter->state);
344 #ifdef ETH_TP_MDI_AUTO
345 /* MDI-X => 2; MDI => 1; Auto => 3 */
346 if (ecmd->eth_tp_mdix_ctrl) {
347 /* fix up the value for auto (3 => 0) as zero is mapped
350 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
351 hw->phy.mdix = AUTO_ALL_MODES;
353 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
356 #endif /* ETH_TP_MDI_AUTO */
358 if (netif_running(adapter->netdev)) {
364 clear_bit(__IGB_RESETTING, &adapter->state);
368 static u32 igb_get_link(struct net_device *netdev)
370 struct igb_adapter *adapter = netdev_priv(netdev);
371 struct e1000_mac_info *mac = &adapter->hw.mac;
374 * If the link is not reported up to netdev, interrupts are disabled,
375 * and so the physical link state may have changed since we last
376 * looked. Set get_link_status to make sure that the true link
377 * state is interrogated, rather than pulling a cached and possibly
378 * stale link state from the driver.
380 if (!netif_carrier_ok(netdev))
381 mac->get_link_status = 1;
383 return igb_has_link(adapter);
386 static void igb_get_pauseparam(struct net_device *netdev,
387 struct ethtool_pauseparam *pause)
389 struct igb_adapter *adapter = netdev_priv(netdev);
390 struct e1000_hw *hw = &adapter->hw;
393 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
395 if (hw->fc.current_mode == e1000_fc_rx_pause)
397 else if (hw->fc.current_mode == e1000_fc_tx_pause)
399 else if (hw->fc.current_mode == e1000_fc_full) {
405 static int igb_set_pauseparam(struct net_device *netdev,
406 struct ethtool_pauseparam *pause)
408 struct igb_adapter *adapter = netdev_priv(netdev);
409 struct e1000_hw *hw = &adapter->hw;
412 adapter->fc_autoneg = pause->autoneg;
414 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
415 usleep_range(1000, 2000);
417 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
418 hw->fc.requested_mode = e1000_fc_default;
419 if (netif_running(adapter->netdev)) {
426 if (pause->rx_pause && pause->tx_pause)
427 hw->fc.requested_mode = e1000_fc_full;
428 else if (pause->rx_pause && !pause->tx_pause)
429 hw->fc.requested_mode = e1000_fc_rx_pause;
430 else if (!pause->rx_pause && pause->tx_pause)
431 hw->fc.requested_mode = e1000_fc_tx_pause;
432 else if (!pause->rx_pause && !pause->tx_pause)
433 hw->fc.requested_mode = e1000_fc_none;
435 hw->fc.current_mode = hw->fc.requested_mode;
437 if (hw->phy.media_type == e1000_media_type_fiber) {
438 retval = hw->mac.ops.setup_link(hw);
439 /* implicit goto out */
441 retval = e1000_force_mac_fc(hw);
444 e1000_set_fc_watermarks_generic(hw);
449 clear_bit(__IGB_RESETTING, &adapter->state);
453 static u32 igb_get_msglevel(struct net_device *netdev)
455 struct igb_adapter *adapter = netdev_priv(netdev);
456 return adapter->msg_enable;
459 static void igb_set_msglevel(struct net_device *netdev, u32 data)
461 struct igb_adapter *adapter = netdev_priv(netdev);
462 adapter->msg_enable = data;
465 static int igb_get_regs_len(struct net_device *netdev)
467 #define IGB_REGS_LEN 555
468 return IGB_REGS_LEN * sizeof(u32);
471 static void igb_get_regs(struct net_device *netdev,
472 struct ethtool_regs *regs, void *p)
474 struct igb_adapter *adapter = netdev_priv(netdev);
475 struct e1000_hw *hw = &adapter->hw;
479 memset(p, 0, IGB_REGS_LEN * sizeof(u32));
481 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
483 /* General Registers */
484 regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
485 regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
486 regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
487 regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
488 regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
489 regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
490 regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
491 regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
492 regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
493 regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
494 regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
495 regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
498 regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
501 /* Reading EICS for EICR because they read the
502 * same but EICS does not clear on read */
503 regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
504 regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
505 regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
506 regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
507 regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
508 regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
509 /* Reading ICS for ICR because they read the
510 * same but ICS does not clear on read */
511 regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
512 regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
513 regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
514 regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
515 regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
516 regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
517 regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
520 regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
521 regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
522 regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
523 regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
524 regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
525 regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
528 regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
529 regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
530 regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
531 regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
532 regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
533 regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
536 regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
537 regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
538 regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
539 regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
542 regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
543 regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
544 regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
545 regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
546 regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
549 regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
550 regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
551 regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
552 regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
553 regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
554 regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
555 regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
558 regs_buff[54] = adapter->stats.crcerrs;
559 regs_buff[55] = adapter->stats.algnerrc;
560 regs_buff[56] = adapter->stats.symerrs;
561 regs_buff[57] = adapter->stats.rxerrc;
562 regs_buff[58] = adapter->stats.mpc;
563 regs_buff[59] = adapter->stats.scc;
564 regs_buff[60] = adapter->stats.ecol;
565 regs_buff[61] = adapter->stats.mcc;
566 regs_buff[62] = adapter->stats.latecol;
567 regs_buff[63] = adapter->stats.colc;
568 regs_buff[64] = adapter->stats.dc;
569 regs_buff[65] = adapter->stats.tncrs;
570 regs_buff[66] = adapter->stats.sec;
571 regs_buff[67] = adapter->stats.htdpmc;
572 regs_buff[68] = adapter->stats.rlec;
573 regs_buff[69] = adapter->stats.xonrxc;
574 regs_buff[70] = adapter->stats.xontxc;
575 regs_buff[71] = adapter->stats.xoffrxc;
576 regs_buff[72] = adapter->stats.xofftxc;
577 regs_buff[73] = adapter->stats.fcruc;
578 regs_buff[74] = adapter->stats.prc64;
579 regs_buff[75] = adapter->stats.prc127;
580 regs_buff[76] = adapter->stats.prc255;
581 regs_buff[77] = adapter->stats.prc511;
582 regs_buff[78] = adapter->stats.prc1023;
583 regs_buff[79] = adapter->stats.prc1522;
584 regs_buff[80] = adapter->stats.gprc;
585 regs_buff[81] = adapter->stats.bprc;
586 regs_buff[82] = adapter->stats.mprc;
587 regs_buff[83] = adapter->stats.gptc;
588 regs_buff[84] = adapter->stats.gorc;
589 regs_buff[86] = adapter->stats.gotc;
590 regs_buff[88] = adapter->stats.rnbc;
591 regs_buff[89] = adapter->stats.ruc;
592 regs_buff[90] = adapter->stats.rfc;
593 regs_buff[91] = adapter->stats.roc;
594 regs_buff[92] = adapter->stats.rjc;
595 regs_buff[93] = adapter->stats.mgprc;
596 regs_buff[94] = adapter->stats.mgpdc;
597 regs_buff[95] = adapter->stats.mgptc;
598 regs_buff[96] = adapter->stats.tor;
599 regs_buff[98] = adapter->stats.tot;
600 regs_buff[100] = adapter->stats.tpr;
601 regs_buff[101] = adapter->stats.tpt;
602 regs_buff[102] = adapter->stats.ptc64;
603 regs_buff[103] = adapter->stats.ptc127;
604 regs_buff[104] = adapter->stats.ptc255;
605 regs_buff[105] = adapter->stats.ptc511;
606 regs_buff[106] = adapter->stats.ptc1023;
607 regs_buff[107] = adapter->stats.ptc1522;
608 regs_buff[108] = adapter->stats.mptc;
609 regs_buff[109] = adapter->stats.bptc;
610 regs_buff[110] = adapter->stats.tsctc;
611 regs_buff[111] = adapter->stats.iac;
612 regs_buff[112] = adapter->stats.rpthc;
613 regs_buff[113] = adapter->stats.hgptc;
614 regs_buff[114] = adapter->stats.hgorc;
615 regs_buff[116] = adapter->stats.hgotc;
616 regs_buff[118] = adapter->stats.lenerrs;
617 regs_buff[119] = adapter->stats.scvpc;
618 regs_buff[120] = adapter->stats.hrmpc;
620 for (i = 0; i < 4; i++)
621 regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
622 for (i = 0; i < 4; i++)
623 regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
624 for (i = 0; i < 4; i++)
625 regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
626 for (i = 0; i < 4; i++)
627 regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
628 for (i = 0; i < 4; i++)
629 regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
630 for (i = 0; i < 4; i++)
631 regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
632 for (i = 0; i < 4; i++)
633 regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
634 for (i = 0; i < 4; i++)
635 regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
637 for (i = 0; i < 10; i++)
638 regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
639 for (i = 0; i < 8; i++)
640 regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
641 for (i = 0; i < 8; i++)
642 regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
643 for (i = 0; i < 16; i++)
644 regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
645 for (i = 0; i < 16; i++)
646 regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
648 for (i = 0; i < 4; i++)
649 regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
650 for (i = 0; i < 4; i++)
651 regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
652 for (i = 0; i < 4; i++)
653 regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
654 for (i = 0; i < 4; i++)
655 regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
656 for (i = 0; i < 4; i++)
657 regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
658 for (i = 0; i < 4; i++)
659 regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
660 for (i = 0; i < 4; i++)
661 regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
662 for (i = 0; i < 4; i++)
663 regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
664 for (i = 0; i < 4; i++)
665 regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
667 for (i = 0; i < 4; i++)
668 regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
669 for (i = 0; i < 4; i++)
670 regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
671 for (i = 0; i < 32; i++)
672 regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
673 for (i = 0; i < 128; i++)
674 regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
675 for (i = 0; i < 128; i++)
676 regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
677 for (i = 0; i < 4; i++)
678 regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
680 regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
681 regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
682 regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
683 regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
684 if (hw->mac.type > e1000_82580) {
685 regs_buff[551] = adapter->stats.o2bgptc;
686 regs_buff[552] = adapter->stats.b2ospc;
687 regs_buff[553] = adapter->stats.o2bspc;
688 regs_buff[554] = adapter->stats.b2ogprc;
692 static int igb_get_eeprom_len(struct net_device *netdev)
694 struct igb_adapter *adapter = netdev_priv(netdev);
695 return adapter->hw.nvm.word_size * 2;
698 static int igb_get_eeprom(struct net_device *netdev,
699 struct ethtool_eeprom *eeprom, u8 *bytes)
701 struct igb_adapter *adapter = netdev_priv(netdev);
702 struct e1000_hw *hw = &adapter->hw;
704 int first_word, last_word;
708 if (eeprom->len == 0)
711 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
713 first_word = eeprom->offset >> 1;
714 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
716 eeprom_buff = kmalloc(sizeof(u16) *
717 (last_word - first_word + 1), GFP_KERNEL);
721 if (hw->nvm.type == e1000_nvm_eeprom_spi)
722 ret_val = e1000_read_nvm(hw, first_word,
723 last_word - first_word + 1,
726 for (i = 0; i < last_word - first_word + 1; i++) {
727 ret_val = e1000_read_nvm(hw, first_word + i, 1,
734 /* Device's eeprom is always little-endian, word addressable */
735 for (i = 0; i < last_word - first_word + 1; i++)
736 eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
738 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
745 static int igb_set_eeprom(struct net_device *netdev,
746 struct ethtool_eeprom *eeprom, u8 *bytes)
748 struct igb_adapter *adapter = netdev_priv(netdev);
749 struct e1000_hw *hw = &adapter->hw;
752 int max_len, first_word, last_word, ret_val = 0;
755 if (eeprom->len == 0)
758 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
761 max_len = hw->nvm.word_size * 2;
763 first_word = eeprom->offset >> 1;
764 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
765 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
769 ptr = (void *)eeprom_buff;
771 if (eeprom->offset & 1) {
772 /* need read/modify/write of first changed EEPROM word */
773 /* only the second byte of the word is being modified */
774 ret_val = e1000_read_nvm(hw, first_word, 1,
778 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
779 /* need read/modify/write of last changed EEPROM word */
780 /* only the first byte of the word is being modified */
781 ret_val = e1000_read_nvm(hw, last_word, 1,
782 &eeprom_buff[last_word - first_word]);
785 /* Device's eeprom is always little-endian, word addressable */
786 for (i = 0; i < last_word - first_word + 1; i++)
787 le16_to_cpus(&eeprom_buff[i]);
789 memcpy(ptr, bytes, eeprom->len);
791 for (i = 0; i < last_word - first_word + 1; i++)
792 cpu_to_le16s(&eeprom_buff[i]);
794 ret_val = e1000_write_nvm(hw, first_word,
795 last_word - first_word + 1, eeprom_buff);
797 /* Update the checksum if write succeeded.
798 * and flush shadow RAM for 82573 controllers */
800 e1000_update_nvm_checksum(hw);
806 static void igb_get_drvinfo(struct net_device *netdev,
807 struct ethtool_drvinfo *drvinfo)
809 struct igb_adapter *adapter = netdev_priv(netdev);
811 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
812 strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
814 strncpy(drvinfo->fw_version, adapter->fw_version,
815 sizeof(drvinfo->fw_version) - 1);
816 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
817 drvinfo->n_stats = IGB_STATS_LEN;
818 drvinfo->testinfo_len = IGB_TEST_LEN;
819 drvinfo->regdump_len = igb_get_regs_len(netdev);
820 drvinfo->eedump_len = igb_get_eeprom_len(netdev);
823 static void igb_get_ringparam(struct net_device *netdev,
824 struct ethtool_ringparam *ring)
826 struct igb_adapter *adapter = netdev_priv(netdev);
828 ring->rx_max_pending = IGB_MAX_RXD;
829 ring->tx_max_pending = IGB_MAX_TXD;
830 ring->rx_mini_max_pending = 0;
831 ring->rx_jumbo_max_pending = 0;
832 ring->rx_pending = adapter->rx_ring_count;
833 ring->tx_pending = adapter->tx_ring_count;
834 ring->rx_mini_pending = 0;
835 ring->rx_jumbo_pending = 0;
838 static int igb_set_ringparam(struct net_device *netdev,
839 struct ethtool_ringparam *ring)
841 struct igb_adapter *adapter = netdev_priv(netdev);
842 struct igb_ring *temp_ring;
844 u16 new_rx_count, new_tx_count;
846 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
849 new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
850 new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
851 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
853 new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
854 new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
855 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
857 if ((new_tx_count == adapter->tx_ring_count) &&
858 (new_rx_count == adapter->rx_ring_count)) {
863 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
864 usleep_range(1000, 2000);
866 if (!netif_running(adapter->netdev)) {
867 for (i = 0; i < adapter->num_tx_queues; i++)
868 adapter->tx_ring[i]->count = new_tx_count;
869 for (i = 0; i < adapter->num_rx_queues; i++)
870 adapter->rx_ring[i]->count = new_rx_count;
871 adapter->tx_ring_count = new_tx_count;
872 adapter->rx_ring_count = new_rx_count;
876 if (adapter->num_tx_queues > adapter->num_rx_queues)
877 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
879 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
889 * We can't just free everything and then setup again,
890 * because the ISRs in MSI-X mode get passed pointers
891 * to the tx and rx ring structs.
893 if (new_tx_count != adapter->tx_ring_count) {
894 for (i = 0; i < adapter->num_tx_queues; i++) {
895 memcpy(&temp_ring[i], adapter->tx_ring[i],
896 sizeof(struct igb_ring));
898 temp_ring[i].count = new_tx_count;
899 err = igb_setup_tx_resources(&temp_ring[i]);
903 igb_free_tx_resources(&temp_ring[i]);
909 for (i = 0; i < adapter->num_tx_queues; i++) {
910 igb_free_tx_resources(adapter->tx_ring[i]);
912 memcpy(adapter->tx_ring[i], &temp_ring[i],
913 sizeof(struct igb_ring));
916 adapter->tx_ring_count = new_tx_count;
919 if (new_rx_count != adapter->rx_ring_count) {
920 for (i = 0; i < adapter->num_rx_queues; i++) {
921 memcpy(&temp_ring[i], adapter->rx_ring[i],
922 sizeof(struct igb_ring));
924 temp_ring[i].count = new_rx_count;
925 err = igb_setup_rx_resources(&temp_ring[i]);
929 igb_free_rx_resources(&temp_ring[i]);
936 for (i = 0; i < adapter->num_rx_queues; i++) {
937 igb_free_rx_resources(adapter->rx_ring[i]);
939 memcpy(adapter->rx_ring[i], &temp_ring[i],
940 sizeof(struct igb_ring));
943 adapter->rx_ring_count = new_rx_count;
949 clear_bit(__IGB_RESETTING, &adapter->state);
952 static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
953 int reg, u32 mask, u32 write)
955 struct e1000_hw *hw = &adapter->hw;
957 static const u32 _test[] =
958 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
959 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
960 E1000_WRITE_REG(hw, reg, (_test[pat] & write));
961 val = E1000_READ_REG(hw, reg) & mask;
962 if (val != (_test[pat] & write & mask)) {
963 dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
964 "failed: got 0x%08X expected 0x%08X\n",
965 E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
966 *data = E1000_REGISTER(hw, reg);
974 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
975 int reg, u32 mask, u32 write)
977 struct e1000_hw *hw = &adapter->hw;
979 E1000_WRITE_REG(hw, reg, write & mask);
980 val = E1000_READ_REG(hw, reg);
981 if ((write & mask) != (val & mask)) {
982 dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
983 " got 0x%08X expected 0x%08X\n", reg,
984 (val & mask), (write & mask));
985 *data = E1000_REGISTER(hw, reg);
992 #define REG_PATTERN_TEST(reg, mask, write) \
994 if (reg_pattern_test(adapter, data, reg, mask, write)) \
998 #define REG_SET_AND_CHECK(reg, mask, write) \
1000 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1004 static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1006 struct e1000_hw *hw = &adapter->hw;
1007 struct igb_reg_test *test;
1008 u32 value, before, after;
1011 switch (adapter->hw.mac.type) {
1014 test = reg_test_i350;
1015 toggle = 0x7FEFF3FF;
1019 test = reg_test_i210;
1020 toggle = 0x7FEFF3FF;
1023 test = reg_test_82580;
1024 toggle = 0x7FEFF3FF;
1027 test = reg_test_82576;
1028 toggle = 0x7FFFF3FF;
1031 test = reg_test_82575;
1032 toggle = 0x7FFFF3FF;
1036 /* Because the status register is such a special case,
1037 * we handle it separately from the rest of the register
1038 * tests. Some bits are read-only, some toggle, and some
1039 * are writable on newer MACs.
1041 before = E1000_READ_REG(hw, E1000_STATUS);
1042 value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
1043 E1000_WRITE_REG(hw, E1000_STATUS, toggle);
1044 after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
1045 if (value != after) {
1046 dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
1047 "got: 0x%08X expected: 0x%08X\n", after, value);
1051 /* restore previous status */
1052 E1000_WRITE_REG(hw, E1000_STATUS, before);
1054 /* Perform the remainder of the register test, looping through
1055 * the test table until we either fail or reach the null entry.
1058 for (i = 0; i < test->array_len; i++) {
1059 switch (test->test_type) {
1061 REG_PATTERN_TEST(test->reg +
1062 (i * test->reg_offset),
1067 REG_SET_AND_CHECK(test->reg +
1068 (i * test->reg_offset),
1074 (adapter->hw.hw_addr + test->reg)
1075 + (i * test->reg_offset));
1078 REG_PATTERN_TEST(test->reg + (i * 4),
1082 case TABLE64_TEST_LO:
1083 REG_PATTERN_TEST(test->reg + (i * 8),
1087 case TABLE64_TEST_HI:
1088 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1101 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1105 /* Validate NVM checksum */
1106 if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
1112 static irqreturn_t igb_test_intr(int irq, void *data)
1114 struct igb_adapter *adapter = data;
1115 struct e1000_hw *hw = &adapter->hw;
1117 adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
1122 static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1124 struct e1000_hw *hw = &adapter->hw;
1125 struct net_device *netdev = adapter->netdev;
1126 u32 mask, ics_mask, i = 0, shared_int = TRUE;
1127 u32 irq = adapter->pdev->irq;
1131 /* Hook up test interrupt handler just for this test */
1132 if (adapter->msix_entries) {
1133 if (request_irq(adapter->msix_entries[0].vector,
1134 &igb_test_intr, 0, netdev->name, adapter)) {
1138 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1140 if (request_irq(irq,
1141 igb_test_intr, 0, netdev->name, adapter)) {
1145 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1146 netdev->name, adapter)) {
1148 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1149 netdev->name, adapter)) {
1153 dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
1154 (shared_int ? "shared" : "unshared"));
1156 /* Disable all the interrupts */
1157 E1000_WRITE_REG(hw, E1000_IMC, ~0);
1158 E1000_WRITE_FLUSH(hw);
1159 usleep_range(10000, 20000);
1161 /* Define all writable bits for ICS */
1162 switch (hw->mac.type) {
1164 ics_mask = 0x37F47EDD;
1167 ics_mask = 0x77D4FBFD;
1170 ics_mask = 0x77DCFED5;
1174 ics_mask = 0x77DCFED5;
1178 ics_mask = 0x774CFED5;
1181 ics_mask = 0x7FFFFFFF;
1185 /* Test each interrupt */
1186 for (; i < 31; i++) {
1187 /* Interrupt to test */
1190 if (!(mask & ics_mask))
1194 /* Disable the interrupt to be reported in
1195 * the cause register and then force the same
1196 * interrupt and see if one gets posted. If
1197 * an interrupt was posted to the bus, the
1200 adapter->test_icr = 0;
1202 /* Flush any pending interrupts */
1203 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1205 E1000_WRITE_REG(hw, E1000_IMC, mask);
1206 E1000_WRITE_REG(hw, E1000_ICS, mask);
1207 E1000_WRITE_FLUSH(hw);
1208 usleep_range(10000, 20000);
1210 if (adapter->test_icr & mask) {
1216 /* Enable the interrupt to be reported in
1217 * the cause register and then force the same
1218 * interrupt and see if one gets posted. If
1219 * an interrupt was not posted to the bus, the
1222 adapter->test_icr = 0;
1224 /* Flush any pending interrupts */
1225 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1227 E1000_WRITE_REG(hw, E1000_IMS, mask);
1228 E1000_WRITE_REG(hw, E1000_ICS, mask);
1229 E1000_WRITE_FLUSH(hw);
1230 usleep_range(10000, 20000);
1232 if (!(adapter->test_icr & mask)) {
1238 /* Disable the other interrupts to be reported in
1239 * the cause register and then force the other
1240 * interrupts and see if any get posted. If
1241 * an interrupt was posted to the bus, the
1244 adapter->test_icr = 0;
1246 /* Flush any pending interrupts */
1247 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1249 E1000_WRITE_REG(hw, E1000_IMC, ~mask);
1250 E1000_WRITE_REG(hw, E1000_ICS, ~mask);
1251 E1000_WRITE_FLUSH(hw);
1252 usleep_range(10000, 20000);
1254 if (adapter->test_icr & mask) {
1261 /* Disable all the interrupts */
1262 E1000_WRITE_REG(hw, E1000_IMC, ~0);
1263 E1000_WRITE_FLUSH(hw);
1264 usleep_range(10000, 20000);
1266 /* Unhook test interrupt handler */
1267 if (adapter->msix_entries)
1268 free_irq(adapter->msix_entries[0].vector, adapter);
1270 free_irq(irq, adapter);
1275 static void igb_free_desc_rings(struct igb_adapter *adapter)
1277 igb_free_tx_resources(&adapter->test_tx_ring);
1278 igb_free_rx_resources(&adapter->test_rx_ring);
1281 static int igb_setup_desc_rings(struct igb_adapter *adapter)
1283 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1284 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1285 struct e1000_hw *hw = &adapter->hw;
1288 /* Setup Tx descriptor ring and Tx buffers */
1289 tx_ring->count = IGB_DEFAULT_TXD;
1290 tx_ring->dev = pci_dev_to_dev(adapter->pdev);
1291 tx_ring->netdev = adapter->netdev;
1292 tx_ring->reg_idx = adapter->vfs_allocated_count;
1294 if (igb_setup_tx_resources(tx_ring)) {
1299 igb_setup_tctl(adapter);
1300 igb_configure_tx_ring(adapter, tx_ring);
1302 /* Setup Rx descriptor ring and Rx buffers */
1303 rx_ring->count = IGB_DEFAULT_RXD;
1304 rx_ring->dev = pci_dev_to_dev(adapter->pdev);
1305 rx_ring->netdev = adapter->netdev;
1306 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1307 rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
1309 rx_ring->reg_idx = adapter->vfs_allocated_count;
1311 if (igb_setup_rx_resources(rx_ring)) {
1316 /* set the default queue to queue 0 of PF */
1317 E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
1319 /* enable receive ring */
1320 igb_setup_rctl(adapter);
1321 igb_configure_rx_ring(adapter, rx_ring);
1323 igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
1328 igb_free_desc_rings(adapter);
1332 static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1334 struct e1000_hw *hw = &adapter->hw;
1336 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1337 e1000_write_phy_reg(hw, 29, 0x001F);
1338 e1000_write_phy_reg(hw, 30, 0x8FFC);
1339 e1000_write_phy_reg(hw, 29, 0x001A);
1340 e1000_write_phy_reg(hw, 30, 0x8FF0);
1343 static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1345 struct e1000_hw *hw = &adapter->hw;
1348 hw->mac.autoneg = FALSE;
1350 if (hw->phy.type == e1000_phy_m88) {
1351 if (hw->phy.id != I210_I_PHY_ID) {
1352 /* Auto-MDI/MDIX Off */
1353 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1354 /* reset to update Auto-MDI/MDIX */
1355 e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1357 e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1359 /* force 1000, set loopback */
1360 e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1361 e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1364 /* enable MII loopback */
1365 if (hw->phy.type == e1000_phy_82580)
1366 e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
1369 /* force 1000, set loopback */
1370 e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1372 /* Now set up the MAC to the same speed/duplex as the PHY. */
1373 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1374 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1375 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1376 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1377 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1378 E1000_CTRL_FD | /* Force Duplex to FULL */
1379 E1000_CTRL_SLU); /* Set link up enable bit */
1381 if (hw->phy.type == e1000_phy_m88)
1382 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1384 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1386 /* Disable the receiver on the PHY so when a cable is plugged in, the
1387 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1389 if (hw->phy.type == e1000_phy_m88)
1390 igb_phy_disable_receiver(adapter);
1396 static int igb_set_phy_loopback(struct igb_adapter *adapter)
1398 return igb_integrated_phy_loopback(adapter);
1401 static int igb_setup_loopback_test(struct igb_adapter *adapter)
1403 struct e1000_hw *hw = &adapter->hw;
1406 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1408 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1409 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1410 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1411 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1412 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1413 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1415 /* Enable DH89xxCC MPHY for near end loopback */
1416 reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1417 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1418 E1000_MPHY_PCS_CLK_REG_OFFSET;
1419 E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1421 reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1422 reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1423 E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1426 reg = E1000_READ_REG(hw, E1000_RCTL);
1427 reg |= E1000_RCTL_LBM_TCVR;
1428 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1430 E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1432 reg = E1000_READ_REG(hw, E1000_CTRL);
1433 reg &= ~(E1000_CTRL_RFCE |
1436 reg |= E1000_CTRL_SLU |
1438 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1440 /* Unset switch control to serdes energy detect */
1441 reg = E1000_READ_REG(hw, E1000_CONNSW);
1442 reg &= ~E1000_CONNSW_ENRGSRC;
1443 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1445 /* Unset sigdetect for SERDES loopback on
1446 * 82580 and newer devices
1448 if (hw->mac.type >= e1000_82580) {
1449 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1450 reg |= E1000_PCS_CFG_IGN_SD;
1451 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1454 /* Set PCS register for forced speed */
1455 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1456 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
1457 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1458 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1459 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1460 E1000_PCS_LCTL_FSD | /* Force Speed */
1461 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1462 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1467 return igb_set_phy_loopback(adapter);
1470 static void igb_loopback_cleanup(struct igb_adapter *adapter)
1472 struct e1000_hw *hw = &adapter->hw;
1476 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1477 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1478 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1479 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1482 /* Disable near end loopback on DH89xxCC */
1483 reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1484 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
1485 E1000_MPHY_PCS_CLK_REG_OFFSET;
1486 E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1488 reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1489 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1490 E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1493 rctl = E1000_READ_REG(hw, E1000_RCTL);
1494 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1495 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1497 hw->mac.autoneg = TRUE;
1498 e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1499 if (phy_reg & MII_CR_LOOPBACK) {
1500 phy_reg &= ~MII_CR_LOOPBACK;
1501 if (hw->phy.type == I210_I_PHY_ID)
1502 e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1503 e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1504 e1000_phy_commit(hw);
1507 static void igb_create_lbtest_frame(struct sk_buff *skb,
1508 unsigned int frame_size)
1510 memset(skb->data, 0xFF, frame_size);
1512 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1513 memset(&skb->data[frame_size + 10], 0xBE, 1);
1514 memset(&skb->data[frame_size + 12], 0xAF, 1);
1517 static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1518 unsigned int frame_size)
1520 unsigned char *data;
1525 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1526 data = rx_buffer->skb->data;
1528 data = kmap(rx_buffer->page);
1531 if (data[3] != 0xFF ||
1532 data[frame_size + 10] != 0xBE ||
1533 data[frame_size + 12] != 0xAF)
1536 #ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
1537 kunmap(rx_buffer->page);
1543 static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
1544 struct igb_ring *tx_ring,
1547 union e1000_adv_rx_desc *rx_desc;
1548 struct igb_rx_buffer *rx_buffer_info;
1549 struct igb_tx_buffer *tx_buffer_info;
1550 u16 rx_ntc, tx_ntc, count = 0;
1552 /* initialize next to clean and descriptor values */
1553 rx_ntc = rx_ring->next_to_clean;
1554 tx_ntc = tx_ring->next_to_clean;
1555 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1557 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1558 /* check rx buffer */
1559 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1561 /* sync Rx buffer for CPU read */
1562 dma_sync_single_for_cpu(rx_ring->dev,
1563 rx_buffer_info->dma,
1564 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1571 /* verify contents of skb */
1572 if (igb_check_lbtest_frame(rx_buffer_info, size))
1575 /* sync Rx buffer for device write */
1576 dma_sync_single_for_device(rx_ring->dev,
1577 rx_buffer_info->dma,
1578 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1585 /* unmap buffer on tx side */
1586 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1587 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1589 /* increment rx/tx next to clean counters */
1591 if (rx_ntc == rx_ring->count)
1594 if (tx_ntc == tx_ring->count)
1597 /* fetch next descriptor */
1598 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1601 /* re-map buffers to ring, store next to clean values */
1602 igb_alloc_rx_buffers(rx_ring, count);
1603 rx_ring->next_to_clean = rx_ntc;
1604 tx_ring->next_to_clean = tx_ntc;
1609 static int igb_run_loopback_test(struct igb_adapter *adapter)
1611 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1612 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1613 u16 i, j, lc, good_cnt;
1615 unsigned int size = IGB_RX_HDR_LEN;
1616 netdev_tx_t tx_ret_val;
1617 struct sk_buff *skb;
1619 /* allocate test skb */
1620 skb = alloc_skb(size, GFP_KERNEL);
1624 /* place data into test skb */
1625 igb_create_lbtest_frame(skb, size);
1629 * Calculate the loop count based on the largest descriptor ring
1630 * The idea is to wrap the largest ring a number of times using 64
1631 * send/receive pairs during each loop
1634 if (rx_ring->count <= tx_ring->count)
1635 lc = ((tx_ring->count / 64) * 2) + 1;
1637 lc = ((rx_ring->count / 64) * 2) + 1;
1639 for (j = 0; j <= lc; j++) { /* loop count loop */
1640 /* reset count of good packets */
1643 /* place 64 packets on the transmit queue*/
1644 for (i = 0; i < 64; i++) {
1646 tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
1647 if (tx_ret_val == NETDEV_TX_OK)
1651 if (good_cnt != 64) {
1656 /* allow 200 milliseconds for packets to go from tx to rx */
1659 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1660 if (good_cnt != 64) {
1664 } /* end loop count loop */
1666 /* free the original skb */
1672 static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1674 /* PHY loopback cannot be performed if SoL/IDER
1675 * sessions are active */
1676 if (e1000_check_reset_block(&adapter->hw)) {
1677 dev_err(pci_dev_to_dev(adapter->pdev),
1678 "Cannot do PHY loopback test "
1679 "when SoL/IDER is active.\n");
1683 if (adapter->hw.mac.type == e1000_i354) {
1684 dev_info(&adapter->pdev->dev,
1685 "Loopback test not supported on i354.\n");
1689 *data = igb_setup_desc_rings(adapter);
1692 *data = igb_setup_loopback_test(adapter);
1695 *data = igb_run_loopback_test(adapter);
1697 igb_loopback_cleanup(adapter);
1700 igb_free_desc_rings(adapter);
1705 static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1712 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1714 adapter->hw.mac.serdes_has_link = FALSE;
1716 /* On some blade server designs, link establishment
1717 * could take as long as 2-3 minutes */
1719 e1000_check_for_link(&adapter->hw);
1720 if (adapter->hw.mac.serdes_has_link)
1723 } while (i++ < 3750);
1727 for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
1728 link = igb_has_link(adapter);
1743 static void igb_diag_test(struct net_device *netdev,
1744 struct ethtool_test *eth_test, u64 *data)
1746 struct igb_adapter *adapter = netdev_priv(netdev);
1747 u16 autoneg_advertised;
1748 u8 forced_speed_duplex, autoneg;
1749 bool if_running = netif_running(netdev);
1751 set_bit(__IGB_TESTING, &adapter->state);
1752 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1755 /* save speed, duplex, autoneg settings */
1756 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1757 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1758 autoneg = adapter->hw.mac.autoneg;
1760 dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
1762 /* power up link for link test */
1763 igb_power_up_link(adapter);
1765 /* Link test performed before hardware reset so autoneg doesn't
1766 * interfere with test result */
1767 if (igb_link_test(adapter, &data[4]))
1768 eth_test->flags |= ETH_TEST_FL_FAILED;
1771 /* indicate we're in test mode */
1776 if (igb_reg_test(adapter, &data[0]))
1777 eth_test->flags |= ETH_TEST_FL_FAILED;
1780 if (igb_eeprom_test(adapter, &data[1]))
1781 eth_test->flags |= ETH_TEST_FL_FAILED;
1784 if (igb_intr_test(adapter, &data[2]))
1785 eth_test->flags |= ETH_TEST_FL_FAILED;
1789 /* power up link for loopback test */
1790 igb_power_up_link(adapter);
1792 if (igb_loopback_test(adapter, &data[3]))
1793 eth_test->flags |= ETH_TEST_FL_FAILED;
1795 /* restore speed, duplex, autoneg settings */
1796 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1797 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1798 adapter->hw.mac.autoneg = autoneg;
1800 /* force this routine to wait until autoneg complete/timeout */
1801 adapter->hw.phy.autoneg_wait_to_complete = TRUE;
1803 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
1805 clear_bit(__IGB_TESTING, &adapter->state);
1809 dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
1811 /* PHY is powered down when interface is down */
1812 if (if_running && igb_link_test(adapter, &data[4]))
1813 eth_test->flags |= ETH_TEST_FL_FAILED;
1817 /* Online tests aren't run; pass by default */
1823 clear_bit(__IGB_TESTING, &adapter->state);
1825 msleep_interruptible(4 * 1000);
1828 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1830 struct igb_adapter *adapter = netdev_priv(netdev);
1832 wol->supported = WAKE_UCAST | WAKE_MCAST |
1833 WAKE_BCAST | WAKE_MAGIC |
1837 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
1840 /* apply any specific unsupported masks here */
1841 switch (adapter->hw.device_id) {
1846 if (adapter->wol & E1000_WUFC_EX)
1847 wol->wolopts |= WAKE_UCAST;
1848 if (adapter->wol & E1000_WUFC_MC)
1849 wol->wolopts |= WAKE_MCAST;
1850 if (adapter->wol & E1000_WUFC_BC)
1851 wol->wolopts |= WAKE_BCAST;
1852 if (adapter->wol & E1000_WUFC_MAG)
1853 wol->wolopts |= WAKE_MAGIC;
1854 if (adapter->wol & E1000_WUFC_LNKC)
1855 wol->wolopts |= WAKE_PHY;
1858 static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1860 struct igb_adapter *adapter = netdev_priv(netdev);
1862 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1865 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
1866 return wol->wolopts ? -EOPNOTSUPP : 0;
1868 /* these settings will always override what we currently have */
1871 if (wol->wolopts & WAKE_UCAST)
1872 adapter->wol |= E1000_WUFC_EX;
1873 if (wol->wolopts & WAKE_MCAST)
1874 adapter->wol |= E1000_WUFC_MC;
1875 if (wol->wolopts & WAKE_BCAST)
1876 adapter->wol |= E1000_WUFC_BC;
1877 if (wol->wolopts & WAKE_MAGIC)
1878 adapter->wol |= E1000_WUFC_MAG;
1879 if (wol->wolopts & WAKE_PHY)
1880 adapter->wol |= E1000_WUFC_LNKC;
1881 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1886 /* bit defines for adapter->led_status */
1887 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
1888 static int igb_set_phys_id(struct net_device *netdev,
1889 enum ethtool_phys_id_state state)
1891 struct igb_adapter *adapter = netdev_priv(netdev);
1892 struct e1000_hw *hw = &adapter->hw;
1895 case ETHTOOL_ID_ACTIVE:
1896 e1000_blink_led(hw);
1901 case ETHTOOL_ID_OFF:
1904 case ETHTOOL_ID_INACTIVE:
1906 e1000_cleanup_led(hw);
1913 static int igb_phys_id(struct net_device *netdev, u32 data)
1915 struct igb_adapter *adapter = netdev_priv(netdev);
1916 struct e1000_hw *hw = &adapter->hw;
1917 unsigned long timeout;
1919 timeout = data * 1000;
1922 * msleep_interruptable only accepts unsigned int so we are limited
1923 * in how long a duration we can wait
1925 if (!timeout || timeout > UINT_MAX)
1928 e1000_blink_led(hw);
1929 msleep_interruptible(timeout);
1932 e1000_cleanup_led(hw);
1936 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
1938 static int igb_set_coalesce(struct net_device *netdev,
1939 struct ethtool_coalesce *ec)
1941 struct igb_adapter *adapter = netdev_priv(netdev);
1944 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1945 ((ec->rx_coalesce_usecs > 3) &&
1946 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1947 (ec->rx_coalesce_usecs == 2))
1949 printk("set_coalesce:invalid parameter..");
1953 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1954 ((ec->tx_coalesce_usecs > 3) &&
1955 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1956 (ec->tx_coalesce_usecs == 2))
1959 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1962 if (ec->tx_max_coalesced_frames_irq)
1963 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
1965 /* If ITR is disabled, disable DMAC */
1966 if (ec->rx_coalesce_usecs == 0) {
1967 adapter->dmac = IGB_DMAC_DISABLE;
1970 /* convert to rate of irq's per second */
1971 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1972 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1974 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1976 /* convert to rate of irq's per second */
1977 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1978 adapter->tx_itr_setting = adapter->rx_itr_setting;
1979 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1980 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1982 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1984 for (i = 0; i < adapter->num_q_vectors; i++) {
1985 struct igb_q_vector *q_vector = adapter->q_vector[i];
1986 q_vector->tx.work_limit = adapter->tx_work_limit;
1987 if (q_vector->rx.ring)
1988 q_vector->itr_val = adapter->rx_itr_setting;
1990 q_vector->itr_val = adapter->tx_itr_setting;
1991 if (q_vector->itr_val && q_vector->itr_val <= 3)
1992 q_vector->itr_val = IGB_START_ITR;
1993 q_vector->set_itr = 1;
1999 static int igb_get_coalesce(struct net_device *netdev,
2000 struct ethtool_coalesce *ec)
2002 struct igb_adapter *adapter = netdev_priv(netdev);
2004 if (adapter->rx_itr_setting <= 3)
2005 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2007 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2009 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
2011 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
2012 if (adapter->tx_itr_setting <= 3)
2013 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2015 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2021 static int igb_nway_reset(struct net_device *netdev)
2023 struct igb_adapter *adapter = netdev_priv(netdev);
2024 if (netif_running(netdev))
2025 igb_reinit_locked(adapter);
2029 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
2030 static int igb_get_sset_count(struct net_device *netdev, int sset)
2034 return IGB_STATS_LEN;
2036 return IGB_TEST_LEN;
2042 static int igb_get_stats_count(struct net_device *netdev)
2044 return IGB_STATS_LEN;
2047 static int igb_diag_test_count(struct net_device *netdev)
2049 return IGB_TEST_LEN;
2053 static void igb_get_ethtool_stats(struct net_device *netdev,
2054 struct ethtool_stats *stats, u64 *data)
2056 struct igb_adapter *adapter = netdev_priv(netdev);
2057 #ifdef HAVE_NETDEV_STATS_IN_NETDEV
2058 struct net_device_stats *net_stats = &netdev->stats;
2060 struct net_device_stats *net_stats = &adapter->net_stats;
2066 igb_update_stats(adapter);
2068 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2069 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
2070 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
2071 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2073 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2074 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2075 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2076 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2078 for (j = 0; j < adapter->num_tx_queues; j++) {
2079 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
2080 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
2081 data[i] = queue_stat[k];
2083 for (j = 0; j < adapter->num_rx_queues; j++) {
2084 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
2085 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
2086 data[i] = queue_stat[k];
2090 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2092 struct igb_adapter *adapter = netdev_priv(netdev);
2096 switch (stringset) {
2098 memcpy(data, *igb_gstrings_test,
2099 IGB_TEST_LEN*ETH_GSTRING_LEN);
2102 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2103 memcpy(p, igb_gstrings_stats[i].stat_string,
2105 p += ETH_GSTRING_LEN;
2107 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2108 memcpy(p, igb_gstrings_net_stats[i].stat_string,
2110 p += ETH_GSTRING_LEN;
2112 for (i = 0; i < adapter->num_tx_queues; i++) {
2113 sprintf(p, "tx_queue_%u_packets", i);
2114 p += ETH_GSTRING_LEN;
2115 sprintf(p, "tx_queue_%u_bytes", i);
2116 p += ETH_GSTRING_LEN;
2117 sprintf(p, "tx_queue_%u_restart", i);
2118 p += ETH_GSTRING_LEN;
2120 for (i = 0; i < adapter->num_rx_queues; i++) {
2121 sprintf(p, "rx_queue_%u_packets", i);
2122 p += ETH_GSTRING_LEN;
2123 sprintf(p, "rx_queue_%u_bytes", i);
2124 p += ETH_GSTRING_LEN;
2125 sprintf(p, "rx_queue_%u_drops", i);
2126 p += ETH_GSTRING_LEN;
2127 sprintf(p, "rx_queue_%u_csum_err", i);
2128 p += ETH_GSTRING_LEN;
2129 sprintf(p, "rx_queue_%u_alloc_failed", i);
2130 p += ETH_GSTRING_LEN;
2131 sprintf(p, "rx_queue_%u_ipv4_packets", i);
2132 p += ETH_GSTRING_LEN;
2133 sprintf(p, "rx_queue_%u_ipv4e_packets", i);
2134 p += ETH_GSTRING_LEN;
2135 sprintf(p, "rx_queue_%u_ipv6_packets", i);
2136 p += ETH_GSTRING_LEN;
2137 sprintf(p, "rx_queue_%u_ipv6e_packets", i);
2138 p += ETH_GSTRING_LEN;
2139 sprintf(p, "rx_queue_%u_tcp_packets", i);
2140 p += ETH_GSTRING_LEN;
2141 sprintf(p, "rx_queue_%u_udp_packets", i);
2142 p += ETH_GSTRING_LEN;
2143 sprintf(p, "rx_queue_%u_sctp_packets", i);
2144 p += ETH_GSTRING_LEN;
2145 sprintf(p, "rx_queue_%u_nfs_packets", i);
2146 p += ETH_GSTRING_LEN;
2148 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2153 #ifdef HAVE_ETHTOOL_GET_TS_INFO
2154 static int igb_get_ts_info(struct net_device *dev,
2155 struct ethtool_ts_info *info)
2157 struct igb_adapter *adapter = netdev_priv(dev);
2159 switch (adapter->hw.mac.type) {
2160 #ifdef HAVE_PTP_1588_CLOCK
2162 info->so_timestamping =
2163 SOF_TIMESTAMPING_TX_SOFTWARE |
2164 SOF_TIMESTAMPING_RX_SOFTWARE |
2165 SOF_TIMESTAMPING_SOFTWARE;
2173 info->so_timestamping =
2174 SOF_TIMESTAMPING_TX_SOFTWARE |
2175 SOF_TIMESTAMPING_RX_SOFTWARE |
2176 SOF_TIMESTAMPING_SOFTWARE |
2177 SOF_TIMESTAMPING_TX_HARDWARE |
2178 SOF_TIMESTAMPING_RX_HARDWARE |
2179 SOF_TIMESTAMPING_RAW_HARDWARE;
2181 if (adapter->ptp_clock)
2182 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2184 info->phc_index = -1;
2187 (1 << HWTSTAMP_TX_OFF) |
2188 (1 << HWTSTAMP_TX_ON);
2190 info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
2192 /* 82576 does not support timestamping all packets. */
2193 if (adapter->hw.mac.type >= e1000_82580)
2194 info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
2197 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2198 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2199 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2200 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2201 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2202 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2203 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2206 #endif /* HAVE_PTP_1588_CLOCK */
2211 #endif /* HAVE_ETHTOOL_GET_TS_INFO */
2213 #ifdef CONFIG_PM_RUNTIME
2214 static int igb_ethtool_begin(struct net_device *netdev)
2216 struct igb_adapter *adapter = netdev_priv(netdev);
2218 pm_runtime_get_sync(&adapter->pdev->dev);
2223 static void igb_ethtool_complete(struct net_device *netdev)
2225 struct igb_adapter *adapter = netdev_priv(netdev);
2227 pm_runtime_put(&adapter->pdev->dev);
2229 #endif /* CONFIG_PM_RUNTIME */
2231 #ifndef HAVE_NDO_SET_FEATURES
2232 static u32 igb_get_rx_csum(struct net_device *netdev)
2234 return !!(netdev->features & NETIF_F_RXCSUM);
2237 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
2239 const u32 feature_list = NETIF_F_RXCSUM;
2242 netdev->features |= feature_list;
2244 netdev->features &= ~feature_list;
2249 static int igb_set_tx_csum(struct net_device *netdev, u32 data)
2251 struct igb_adapter *adapter = netdev_priv(netdev);
2252 #ifdef NETIF_F_IPV6_CSUM
2253 u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2255 u32 feature_list = NETIF_F_IP_CSUM;
2258 if (adapter->hw.mac.type >= e1000_82576)
2259 feature_list |= NETIF_F_SCTP_CSUM;
2262 netdev->features |= feature_list;
2264 netdev->features &= ~feature_list;
2270 static int igb_set_tso(struct net_device *netdev, u32 data)
2273 const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
2275 const u32 feature_list = NETIF_F_TSO;
2279 netdev->features |= feature_list;
2281 netdev->features &= ~feature_list;
2283 #ifndef HAVE_NETDEV_VLAN_FEATURES
2285 struct igb_adapter *adapter = netdev_priv(netdev);
2286 struct net_device *v_netdev;
2289 /* disable TSO on all VLANs if they're present */
2290 if (!adapter->vlgrp)
2293 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2294 v_netdev = vlan_group_get_device(adapter->vlgrp, i);
2298 v_netdev->features &= ~feature_list;
2299 vlan_group_set_device(adapter->vlgrp, i, v_netdev);
2305 #endif /* HAVE_NETDEV_VLAN_FEATURES */
2309 #endif /* NETIF_F_TSO */
2310 #ifdef ETHTOOL_GFLAGS
2311 static int igb_set_flags(struct net_device *netdev, u32 data)
2313 u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2315 #ifndef HAVE_VLAN_RX_REGISTER
2316 u32 changed = netdev->features ^ data;
2321 supported_flags |= ETH_FLAG_LRO;
2324 * Since there is no support for separate tx vlan accel
2325 * enabled make sure tx flag is cleared if rx is.
2327 if (!(data & ETH_FLAG_RXVLAN))
2328 data &= ~ETH_FLAG_TXVLAN;
2330 rc = ethtool_op_set_flags(netdev, data, supported_flags);
2333 #ifndef HAVE_VLAN_RX_REGISTER
2335 if (changed & ETH_FLAG_RXVLAN)
2336 igb_vlan_mode(netdev, data);
2342 #endif /* ETHTOOL_GFLAGS */
2343 #endif /* HAVE_NDO_SET_FEATURES */
2344 #ifdef ETHTOOL_SADV_COAL
2345 static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
2347 struct igb_adapter *adapter = netdev_priv(netdev);
2349 switch (edata->data) {
2350 case IGB_DMAC_DISABLE:
2351 adapter->dmac = edata->data;
2354 adapter->dmac = edata->data;
2357 adapter->dmac = edata->data;
2359 case IGB_DMAC_EN_DEFAULT:
2360 adapter->dmac = edata->data;
2363 adapter->dmac = edata->data;
2366 adapter->dmac = edata->data;
2369 adapter->dmac = edata->data;
2372 adapter->dmac = edata->data;
2375 adapter->dmac = edata->data;
2378 adapter->dmac = edata->data;
2381 adapter->dmac = edata->data;
2384 adapter->dmac = edata->data;
2387 adapter->dmac = edata->data;
2390 adapter->dmac = IGB_DMAC_DISABLE;
2391 printk("set_dmac: invalid setting, setting DMAC to %d\n",
2394 printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
2397 #endif /* ETHTOOL_SADV_COAL */
2398 #ifdef ETHTOOL_GADV_COAL
2399 static void igb_get_dmac(struct net_device *netdev,
2400 struct ethtool_value *edata)
2402 struct igb_adapter *adapter = netdev_priv(netdev);
2403 edata->data = adapter->dmac;
2410 static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2412 struct igb_adapter *adapter = netdev_priv(netdev);
2413 struct e1000_hw *hw = &adapter->hw;
2417 if ((hw->mac.type < e1000_i350) ||
2418 (hw->phy.media_type != e1000_media_type_copper))
2421 edata->supported = (SUPPORTED_1000baseT_Full |
2422 SUPPORTED_100baseT_Full);
2424 if (!hw->dev_spec._82575.eee_disable)
2426 mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2428 /* The IPCNFG and EEER registers are not supported on I354. */
2429 if (hw->mac.type == e1000_i354) {
2430 e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
2434 eeer = E1000_READ_REG(hw, E1000_EEER);
2436 /* EEE status on negotiated link */
2437 if (eeer & E1000_EEER_EEE_NEG)
2438 edata->eee_active = true;
2440 if (eeer & E1000_EEER_TX_LPI_EN)
2441 edata->tx_lpi_enabled = true;
2444 /* EEE Link Partner Advertised */
2445 switch (hw->mac.type) {
2447 ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
2452 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2458 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
2459 E1000_EEE_LP_ADV_DEV_I210,
2464 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2471 edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2473 if ((hw->mac.type == e1000_i354) &&
2474 (edata->eee_enabled))
2475 edata->tx_lpi_enabled = true;
2478 * report correct negotiated EEE status for devices that
2479 * wrongly report EEE at half-duplex
2481 if (adapter->link_duplex == HALF_DUPLEX) {
2482 edata->eee_enabled = false;
2483 edata->eee_active = false;
2484 edata->tx_lpi_enabled = false;
2485 edata->advertised &= ~edata->advertised;
2493 static int igb_set_eee(struct net_device *netdev,
2494 struct ethtool_eee *edata)
2496 struct igb_adapter *adapter = netdev_priv(netdev);
2497 struct e1000_hw *hw = &adapter->hw;
2498 struct ethtool_eee eee_curr;
2501 if ((hw->mac.type < e1000_i350) ||
2502 (hw->phy.media_type != e1000_media_type_copper))
2505 ret_val = igb_get_eee(netdev, &eee_curr);
2509 if (eee_curr.eee_enabled) {
2510 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2511 dev_err(pci_dev_to_dev(adapter->pdev),
2512 "Setting EEE tx-lpi is not supported\n");
2516 /* Tx LPI time is not implemented currently */
2517 if (edata->tx_lpi_timer) {
2518 dev_err(pci_dev_to_dev(adapter->pdev),
2519 "Setting EEE Tx LPI timer is not supported\n");
2523 if (edata->advertised &
2524 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2525 dev_err(pci_dev_to_dev(adapter->pdev),
2526 "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
2530 } else if (!edata->eee_enabled) {
2531 dev_err(pci_dev_to_dev(adapter->pdev),
2532 "Setting EEE options is not supported with EEE disabled\n");
2536 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2538 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2539 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2542 if (netif_running(netdev))
2543 igb_reinit_locked(adapter);
2550 #endif /* ETHTOOL_SEEE */
2552 #ifdef ETHTOOL_GRXRINGS
2553 static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2554 struct ethtool_rxnfc *cmd)
2558 /* Report default options for RSS on igb */
2559 switch (cmd->flow_type) {
2561 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2563 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2564 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2566 case AH_ESP_V4_FLOW:
2570 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2573 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2575 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2576 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2578 case AH_ESP_V6_FLOW:
2582 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2591 static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2592 #ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
2598 struct igb_adapter *adapter = netdev_priv(dev);
2599 int ret = -EOPNOTSUPP;
2602 case ETHTOOL_GRXRINGS:
2603 cmd->data = adapter->num_rx_queues;
2607 ret = igb_get_rss_hash_opts(adapter, cmd);
2616 #define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
2617 IGB_FLAG_RSS_FIELD_IPV6_UDP)
2618 static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
2619 struct ethtool_rxnfc *nfc)
2621 u32 flags = adapter->flags;
2624 * RSS does not support anything other than hashing
2625 * to queues on src and dst IPs and ports
2627 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2628 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2631 switch (nfc->flow_type) {
2634 if (!(nfc->data & RXH_IP_SRC) ||
2635 !(nfc->data & RXH_IP_DST) ||
2636 !(nfc->data & RXH_L4_B_0_1) ||
2637 !(nfc->data & RXH_L4_B_2_3))
2641 if (!(nfc->data & RXH_IP_SRC) ||
2642 !(nfc->data & RXH_IP_DST))
2644 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2646 flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
2648 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2649 flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
2656 if (!(nfc->data & RXH_IP_SRC) ||
2657 !(nfc->data & RXH_IP_DST))
2659 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2661 flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
2663 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2664 flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
2670 case AH_ESP_V4_FLOW:
2674 case AH_ESP_V6_FLOW:
2678 if (!(nfc->data & RXH_IP_SRC) ||
2679 !(nfc->data & RXH_IP_DST) ||
2680 (nfc->data & RXH_L4_B_0_1) ||
2681 (nfc->data & RXH_L4_B_2_3))
2688 /* if we changed something we need to update flags */
2689 if (flags != adapter->flags) {
2690 struct e1000_hw *hw = &adapter->hw;
2691 u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2693 if ((flags & UDP_RSS_FLAGS) &&
2694 !(adapter->flags & UDP_RSS_FLAGS))
2695 DPRINTK(DRV, WARNING,
2696 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2698 adapter->flags = flags;
2700 /* Perform hash on these packet types */
2701 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2702 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2703 E1000_MRQC_RSS_FIELD_IPV6 |
2704 E1000_MRQC_RSS_FIELD_IPV6_TCP;
2706 mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
2707 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2709 if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2710 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2712 if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2713 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2715 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2721 static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2723 struct igb_adapter *adapter = netdev_priv(dev);
2724 int ret = -EOPNOTSUPP;
2728 ret = igb_set_rss_hash_opt(adapter, cmd);
2736 #endif /* ETHTOOL_GRXRINGS */
2738 static const struct ethtool_ops igb_ethtool_ops = {
2739 .get_settings = igb_get_settings,
2740 .set_settings = igb_set_settings,
2741 .get_drvinfo = igb_get_drvinfo,
2742 .get_regs_len = igb_get_regs_len,
2743 .get_regs = igb_get_regs,
2744 .get_wol = igb_get_wol,
2745 .set_wol = igb_set_wol,
2746 .get_msglevel = igb_get_msglevel,
2747 .set_msglevel = igb_set_msglevel,
2748 .nway_reset = igb_nway_reset,
2749 .get_link = igb_get_link,
2750 .get_eeprom_len = igb_get_eeprom_len,
2751 .get_eeprom = igb_get_eeprom,
2752 .set_eeprom = igb_set_eeprom,
2753 .get_ringparam = igb_get_ringparam,
2754 .set_ringparam = igb_set_ringparam,
2755 .get_pauseparam = igb_get_pauseparam,
2756 .set_pauseparam = igb_set_pauseparam,
2757 .self_test = igb_diag_test,
2758 .get_strings = igb_get_strings,
2759 #ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2760 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
2761 .set_phys_id = igb_set_phys_id,
2763 .phys_id = igb_phys_id,
2764 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
2765 #endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2766 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
2767 .get_sset_count = igb_get_sset_count,
2769 .get_stats_count = igb_get_stats_count,
2770 .self_test_count = igb_diag_test_count,
2772 .get_ethtool_stats = igb_get_ethtool_stats,
2773 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
2774 .get_perm_addr = ethtool_op_get_perm_addr,
2776 .get_coalesce = igb_get_coalesce,
2777 .set_coalesce = igb_set_coalesce,
2778 #ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2779 #ifdef HAVE_ETHTOOL_GET_TS_INFO
2780 .get_ts_info = igb_get_ts_info,
2781 #endif /* HAVE_ETHTOOL_GET_TS_INFO */
2782 #endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2783 #ifdef CONFIG_PM_RUNTIME
2784 .begin = igb_ethtool_begin,
2785 .complete = igb_ethtool_complete,
2786 #endif /* CONFIG_PM_RUNTIME */
2787 #ifndef HAVE_NDO_SET_FEATURES
2788 .get_rx_csum = igb_get_rx_csum,
2789 .set_rx_csum = igb_set_rx_csum,
2790 .get_tx_csum = ethtool_op_get_tx_csum,
2791 .set_tx_csum = igb_set_tx_csum,
2792 .get_sg = ethtool_op_get_sg,
2793 .set_sg = ethtool_op_set_sg,
2795 .get_tso = ethtool_op_get_tso,
2796 .set_tso = igb_set_tso,
2798 #ifdef ETHTOOL_GFLAGS
2799 .get_flags = ethtool_op_get_flags,
2800 .set_flags = igb_set_flags,
2801 #endif /* ETHTOOL_GFLAGS */
2802 #endif /* HAVE_NDO_SET_FEATURES */
2803 #ifdef ETHTOOL_GADV_COAL
2804 .get_advcoal = igb_get_adv_coal,
2805 .set_advcoal = igb_set_dmac_coal,
2806 #endif /* ETHTOOL_GADV_COAL */
2807 #ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2809 .get_eee = igb_get_eee,
2812 .set_eee = igb_set_eee,
2814 #endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2815 #ifdef ETHTOOL_GRXRINGS
2816 .get_rxnfc = igb_get_rxnfc,
2817 .set_rxnfc = igb_set_rxnfc,
2821 #ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2822 static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
2823 .size = sizeof(struct ethtool_ops_ext),
2824 .get_ts_info = igb_get_ts_info,
2825 .set_phys_id = igb_set_phys_id,
2826 .get_eee = igb_get_eee,
2827 .set_eee = igb_set_eee,
2830 void igb_set_ethtool_ops(struct net_device *netdev)
2832 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
2833 set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
2836 void igb_set_ethtool_ops(struct net_device *netdev)
2838 /* have to "undeclare" const on this struct to remove warnings */
2839 SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
2841 #endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2842 #endif /* SIOCETHTOOL */