kni: initial import
[dpdk.git] / lib / librte_eal / linuxapp / kni / ethtool / igb / igb_ethtool.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007-2012 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 /* ethtool support for igb */
29
30 #include <linux/netdevice.h>
31 #include <linux/vmalloc.h>
32
33 #ifdef SIOCETHTOOL
34 #include <linux/ethtool.h>
35 #ifdef CONFIG_PM_RUNTIME
36 #include <linux/pm_runtime.h>
37 #endif /* CONFIG_PM_RUNTIME */
38
39 #include "igb.h"
40 #include "igb_regtest.h"
41 #include <linux/if_vlan.h>
42
43 #ifdef ETHTOOL_OPS_COMPAT
44 #include "kcompat_ethtool.c"
45 #endif
46 #ifdef ETHTOOL_GSTATS
47 struct igb_stats {
48         char stat_string[ETH_GSTRING_LEN];
49         int sizeof_stat;
50         int stat_offset;
51 };
52
53 #define IGB_STAT(_name, _stat) { \
54         .stat_string = _name, \
55         .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
56         .stat_offset = offsetof(struct igb_adapter, _stat) \
57 }
58 static const struct igb_stats igb_gstrings_stats[] = {
59         IGB_STAT("rx_packets", stats.gprc),
60         IGB_STAT("tx_packets", stats.gptc),
61         IGB_STAT("rx_bytes", stats.gorc),
62         IGB_STAT("tx_bytes", stats.gotc),
63         IGB_STAT("rx_broadcast", stats.bprc),
64         IGB_STAT("tx_broadcast", stats.bptc),
65         IGB_STAT("rx_multicast", stats.mprc),
66         IGB_STAT("tx_multicast", stats.mptc),
67         IGB_STAT("multicast", stats.mprc),
68         IGB_STAT("collisions", stats.colc),
69         IGB_STAT("rx_crc_errors", stats.crcerrs),
70         IGB_STAT("rx_no_buffer_count", stats.rnbc),
71         IGB_STAT("rx_missed_errors", stats.mpc),
72         IGB_STAT("tx_aborted_errors", stats.ecol),
73         IGB_STAT("tx_carrier_errors", stats.tncrs),
74         IGB_STAT("tx_window_errors", stats.latecol),
75         IGB_STAT("tx_abort_late_coll", stats.latecol),
76         IGB_STAT("tx_deferred_ok", stats.dc),
77         IGB_STAT("tx_single_coll_ok", stats.scc),
78         IGB_STAT("tx_multi_coll_ok", stats.mcc),
79         IGB_STAT("tx_timeout_count", tx_timeout_count),
80         IGB_STAT("rx_long_length_errors", stats.roc),
81         IGB_STAT("rx_short_length_errors", stats.ruc),
82         IGB_STAT("rx_align_errors", stats.algnerrc),
83         IGB_STAT("tx_tcp_seg_good", stats.tsctc),
84         IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
85         IGB_STAT("rx_flow_control_xon", stats.xonrxc),
86         IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
87         IGB_STAT("tx_flow_control_xon", stats.xontxc),
88         IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
89         IGB_STAT("rx_long_byte_count", stats.gorc),
90         IGB_STAT("tx_dma_out_of_sync", stats.doosync),
91 #ifndef IGB_NO_LRO
92         IGB_STAT("lro_aggregated", lro_stats.coal),
93         IGB_STAT("lro_flushed", lro_stats.flushed),
94         IGB_STAT("lro_recycled", lro_stats.recycled),
95 #endif /* IGB_LRO */
96         IGB_STAT("tx_smbus", stats.mgptc),
97         IGB_STAT("rx_smbus", stats.mgprc),
98         IGB_STAT("dropped_smbus", stats.mgpdc),
99         IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
100         IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
101         IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
102         IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
103 };
104
105 #define IGB_NETDEV_STAT(_net_stat) { \
106         .stat_string = #_net_stat, \
107         .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
108         .stat_offset = offsetof(struct net_device_stats, _net_stat) \
109 }
110 static const struct igb_stats igb_gstrings_net_stats[] = {
111         IGB_NETDEV_STAT(rx_errors),
112         IGB_NETDEV_STAT(tx_errors),
113         IGB_NETDEV_STAT(tx_dropped),
114         IGB_NETDEV_STAT(rx_length_errors),
115         IGB_NETDEV_STAT(rx_over_errors),
116         IGB_NETDEV_STAT(rx_frame_errors),
117         IGB_NETDEV_STAT(rx_fifo_errors),
118         IGB_NETDEV_STAT(tx_fifo_errors),
119         IGB_NETDEV_STAT(tx_heartbeat_errors)
120 };
121
122 #define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
123 #define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
124 #define IGB_RX_QUEUE_STATS_LEN \
125         (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
126 #define IGB_TX_QUEUE_STATS_LEN \
127         (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
128 #define IGB_QUEUE_STATS_LEN \
129         ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
130           IGB_RX_QUEUE_STATS_LEN) + \
131          (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
132           IGB_TX_QUEUE_STATS_LEN))
133 #define IGB_STATS_LEN \
134         (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
135
136 #endif /* ETHTOOL_GSTATS */
137 #ifdef ETHTOOL_TEST
138 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
139         "Register test  (offline)", "Eeprom test    (offline)",
140         "Interrupt test (offline)", "Loopback test  (offline)",
141         "Link test   (on/offline)"
142 };
143 #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
144 #endif /* ETHTOOL_TEST */
145
146 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
147 {
148         struct igb_adapter *adapter = netdev_priv(netdev);
149         struct e1000_hw *hw = &adapter->hw;
150         u32 status;
151
152         if (hw->phy.media_type == e1000_media_type_copper) {
153
154                 ecmd->supported = (SUPPORTED_10baseT_Half |
155                                    SUPPORTED_10baseT_Full |
156                                    SUPPORTED_100baseT_Half |
157                                    SUPPORTED_100baseT_Full |
158                                    SUPPORTED_1000baseT_Full|
159                                    SUPPORTED_Autoneg |
160                                    SUPPORTED_TP);
161                 ecmd->advertising = (ADVERTISED_TP |
162                                      ADVERTISED_Pause);
163
164                 if (hw->mac.autoneg == 1) {
165                         ecmd->advertising |= ADVERTISED_Autoneg;
166                         /* the e1000 autoneg seems to match ethtool nicely */
167                         ecmd->advertising |= hw->phy.autoneg_advertised;
168                 }
169
170                 ecmd->port = PORT_TP;
171                 ecmd->phy_address = hw->phy.addr;
172         } else {
173                 ecmd->supported   = (SUPPORTED_1000baseT_Full |
174                                      SUPPORTED_FIBRE |
175                                      SUPPORTED_Autoneg);
176
177                 ecmd->advertising = (ADVERTISED_1000baseT_Full |
178                                      ADVERTISED_FIBRE |
179                                      ADVERTISED_Autoneg |
180                                      ADVERTISED_Pause);
181
182                 ecmd->port = PORT_FIBRE;
183         } 
184
185         ecmd->transceiver = XCVR_INTERNAL;
186
187         status = E1000_READ_REG(hw, E1000_STATUS);
188
189         if (status & E1000_STATUS_LU) {
190
191                 if ((status & E1000_STATUS_SPEED_1000) ||
192                     hw->phy.media_type != e1000_media_type_copper)
193                         ecmd->speed = SPEED_1000;
194                 else if (status & E1000_STATUS_SPEED_100)
195                         ecmd->speed = SPEED_100;
196                 else
197                         ecmd->speed = SPEED_10;
198
199                 if ((status & E1000_STATUS_FD) ||
200                     hw->phy.media_type != e1000_media_type_copper)
201                         ecmd->duplex = DUPLEX_FULL;
202                 else
203                         ecmd->duplex = DUPLEX_HALF;
204         } else {
205                 ecmd->speed = -1;
206                 ecmd->duplex = -1;
207         }
208
209         ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
210 #ifdef ETH_TP_MDI_X
211
212         /* MDI-X => 2; MDI =>1; Invalid =>0 */
213         if ((hw->phy.media_type == e1000_media_type_copper) &&
214             netif_carrier_ok(netdev))
215                 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
216                                                       ETH_TP_MDI;
217         else
218                 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
219
220 #endif /* ETH_TP_MDI_X */
221         return 0;
222 }
223
224 static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
225 {
226         struct igb_adapter *adapter = netdev_priv(netdev);
227         struct e1000_hw *hw = &adapter->hw;
228
229         /* When SoL/IDER sessions are active, autoneg/speed/duplex
230          * cannot be changed */
231         if (e1000_check_reset_block(hw)) {
232                 dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
233                         "characteristics when SoL/IDER is active.\n");
234                 return -EINVAL;
235         }
236
237         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
238                 usleep_range(1000, 2000);
239
240         if (ecmd->autoneg == AUTONEG_ENABLE) {
241                 hw->mac.autoneg = 1;
242                 hw->phy.autoneg_advertised = ecmd->advertising |
243                                              ADVERTISED_TP |
244                                              ADVERTISED_Autoneg;
245                 ecmd->advertising = hw->phy.autoneg_advertised;
246                 if (adapter->fc_autoneg)
247                         hw->fc.requested_mode = e1000_fc_default;
248         } else {
249                 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
250                         clear_bit(__IGB_RESETTING, &adapter->state);
251                         return -EINVAL;
252                 }
253         }
254
255 #ifdef ETH_TP_MDI_X
256         /* MDI-X =>2; MDI=>1; Invalid =>0 */
257         if (hw->phy.media_type == e1000_media_type_copper) {
258                 switch (ecmd->eth_tp_mdix) {
259                 case ETH_TP_MDI_X:
260                         hw->phy.mdix = 2;
261                         break;
262                 case ETH_TP_MDI:
263                         hw->phy.mdix = 1;
264                         break;
265                 case ETH_TP_MDI_INVALID:
266                 default:
267                         hw->phy.mdix = 0;
268                         break;
269                 }
270         }
271
272 #endif /* ETH_TP_MDI_X */
273         /* reset the link */
274         if (netif_running(adapter->netdev)) {
275                 igb_down(adapter);
276                 igb_up(adapter);
277         } else
278                 igb_reset(adapter);
279
280         clear_bit(__IGB_RESETTING, &adapter->state);
281         return 0;
282 }
283
284 static u32 igb_get_link(struct net_device *netdev)
285 {
286         struct igb_adapter *adapter = netdev_priv(netdev);
287         struct e1000_mac_info *mac = &adapter->hw.mac;
288
289         /*
290          * If the link is not reported up to netdev, interrupts are disabled,
291          * and so the physical link state may have changed since we last
292          * looked. Set get_link_status to make sure that the true link
293          * state is interrogated, rather than pulling a cached and possibly
294          * stale link state from the driver.
295          */
296         if (!netif_carrier_ok(netdev))
297                 mac->get_link_status = 1;
298
299         return igb_has_link(adapter);
300 }
301
302 static void igb_get_pauseparam(struct net_device *netdev,
303                                struct ethtool_pauseparam *pause)
304 {
305         struct igb_adapter *adapter = netdev_priv(netdev);
306         struct e1000_hw *hw = &adapter->hw;
307
308         pause->autoneg =
309                 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
310
311         if (hw->fc.current_mode == e1000_fc_rx_pause)
312                 pause->rx_pause = 1;
313         else if (hw->fc.current_mode == e1000_fc_tx_pause)
314                 pause->tx_pause = 1;
315         else if (hw->fc.current_mode == e1000_fc_full) {
316                 pause->rx_pause = 1;
317                 pause->tx_pause = 1;
318         }
319 }
320
321 static int igb_set_pauseparam(struct net_device *netdev,
322                               struct ethtool_pauseparam *pause)
323 {
324         struct igb_adapter *adapter = netdev_priv(netdev);
325         struct e1000_hw *hw = &adapter->hw;
326         int retval = 0;
327
328         adapter->fc_autoneg = pause->autoneg;
329
330         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
331                 usleep_range(1000, 2000);
332
333         if (adapter->fc_autoneg == AUTONEG_ENABLE) {
334                 hw->fc.requested_mode = e1000_fc_default;
335                 if (netif_running(adapter->netdev)) {
336                         igb_down(adapter);
337                         igb_up(adapter);
338                 } else {
339                         igb_reset(adapter);
340                 }
341         } else {
342                 if (pause->rx_pause && pause->tx_pause)
343                         hw->fc.requested_mode = e1000_fc_full;
344                 else if (pause->rx_pause && !pause->tx_pause)
345                         hw->fc.requested_mode = e1000_fc_rx_pause;
346                 else if (!pause->rx_pause && pause->tx_pause)
347                         hw->fc.requested_mode = e1000_fc_tx_pause;
348                 else if (!pause->rx_pause && !pause->tx_pause)
349                         hw->fc.requested_mode = e1000_fc_none;
350
351                 hw->fc.current_mode = hw->fc.requested_mode;
352
353                 retval = ((hw->phy.media_type == e1000_media_type_copper) ?
354                           e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
355         }
356
357         clear_bit(__IGB_RESETTING, &adapter->state);
358         return retval;
359 }
360
361 static u32 igb_get_msglevel(struct net_device *netdev)
362 {
363         struct igb_adapter *adapter = netdev_priv(netdev);
364         return adapter->msg_enable;
365 }
366
367 static void igb_set_msglevel(struct net_device *netdev, u32 data)
368 {
369         struct igb_adapter *adapter = netdev_priv(netdev);
370         adapter->msg_enable = data;
371 }
372
373 static int igb_get_regs_len(struct net_device *netdev)
374 {
375 #define IGB_REGS_LEN 555
376         return IGB_REGS_LEN * sizeof(u32);
377 }
378
379 static void igb_get_regs(struct net_device *netdev,
380                          struct ethtool_regs *regs, void *p)
381 {
382         struct igb_adapter *adapter = netdev_priv(netdev);
383         struct e1000_hw *hw = &adapter->hw;
384         u32 *regs_buff = p;
385         u8 i;
386
387         memset(p, 0, IGB_REGS_LEN * sizeof(u32));
388
389         regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
390
391         /* General Registers */
392         regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
393         regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
394         regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
395         regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
396         regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
397         regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
398         regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
399         regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
400         regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
401         regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
402         regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
403         regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
404
405         /* NVM Register */
406         regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
407
408         /* Interrupt */
409         /* Reading EICS for EICR because they read the
410          * same but EICS does not clear on read */
411         regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
412         regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
413         regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
414         regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
415         regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
416         regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
417         /* Reading ICS for ICR because they read the
418          * same but ICS does not clear on read */
419         regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
420         regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
421         regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
422         regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
423         regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
424         regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
425         regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
426
427         /* Flow Control */
428         regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
429         regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
430         regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
431         regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
432         regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
433         regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
434
435         /* Receive */
436         regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
437         regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
438         regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
439         regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
440         regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
441         regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
442
443         /* Transmit */
444         regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
445         regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
446         regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
447         regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
448
449         /* Wake Up */
450         regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
451         regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
452         regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
453         regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
454         regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
455
456         /* MAC */
457         regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
458         regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
459         regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
460         regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
461         regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
462         regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
463         regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
464
465         /* Statistics */
466         regs_buff[54] = adapter->stats.crcerrs;
467         regs_buff[55] = adapter->stats.algnerrc;
468         regs_buff[56] = adapter->stats.symerrs;
469         regs_buff[57] = adapter->stats.rxerrc;
470         regs_buff[58] = adapter->stats.mpc;
471         regs_buff[59] = adapter->stats.scc;
472         regs_buff[60] = adapter->stats.ecol;
473         regs_buff[61] = adapter->stats.mcc;
474         regs_buff[62] = adapter->stats.latecol;
475         regs_buff[63] = adapter->stats.colc;
476         regs_buff[64] = adapter->stats.dc;
477         regs_buff[65] = adapter->stats.tncrs;
478         regs_buff[66] = adapter->stats.sec;
479         regs_buff[67] = adapter->stats.htdpmc;
480         regs_buff[68] = adapter->stats.rlec;
481         regs_buff[69] = adapter->stats.xonrxc;
482         regs_buff[70] = adapter->stats.xontxc;
483         regs_buff[71] = adapter->stats.xoffrxc;
484         regs_buff[72] = adapter->stats.xofftxc;
485         regs_buff[73] = adapter->stats.fcruc;
486         regs_buff[74] = adapter->stats.prc64;
487         regs_buff[75] = adapter->stats.prc127;
488         regs_buff[76] = adapter->stats.prc255;
489         regs_buff[77] = adapter->stats.prc511;
490         regs_buff[78] = adapter->stats.prc1023;
491         regs_buff[79] = adapter->stats.prc1522;
492         regs_buff[80] = adapter->stats.gprc;
493         regs_buff[81] = adapter->stats.bprc;
494         regs_buff[82] = adapter->stats.mprc;
495         regs_buff[83] = adapter->stats.gptc;
496         regs_buff[84] = adapter->stats.gorc;
497         regs_buff[86] = adapter->stats.gotc;
498         regs_buff[88] = adapter->stats.rnbc;
499         regs_buff[89] = adapter->stats.ruc;
500         regs_buff[90] = adapter->stats.rfc;
501         regs_buff[91] = adapter->stats.roc;
502         regs_buff[92] = adapter->stats.rjc;
503         regs_buff[93] = adapter->stats.mgprc;
504         regs_buff[94] = adapter->stats.mgpdc;
505         regs_buff[95] = adapter->stats.mgptc;
506         regs_buff[96] = adapter->stats.tor;
507         regs_buff[98] = adapter->stats.tot;
508         regs_buff[100] = adapter->stats.tpr;
509         regs_buff[101] = adapter->stats.tpt;
510         regs_buff[102] = adapter->stats.ptc64;
511         regs_buff[103] = adapter->stats.ptc127;
512         regs_buff[104] = adapter->stats.ptc255;
513         regs_buff[105] = adapter->stats.ptc511;
514         regs_buff[106] = adapter->stats.ptc1023;
515         regs_buff[107] = adapter->stats.ptc1522;
516         regs_buff[108] = adapter->stats.mptc;
517         regs_buff[109] = adapter->stats.bptc;
518         regs_buff[110] = adapter->stats.tsctc;
519         regs_buff[111] = adapter->stats.iac;
520         regs_buff[112] = adapter->stats.rpthc;
521         regs_buff[113] = adapter->stats.hgptc;
522         regs_buff[114] = adapter->stats.hgorc;
523         regs_buff[116] = adapter->stats.hgotc;
524         regs_buff[118] = adapter->stats.lenerrs;
525         regs_buff[119] = adapter->stats.scvpc;
526         regs_buff[120] = adapter->stats.hrmpc;
527
528         for (i = 0; i < 4; i++)
529                 regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
530         for (i = 0; i < 4; i++)
531                 regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
532         for (i = 0; i < 4; i++)
533                 regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
534         for (i = 0; i < 4; i++)
535                 regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
536         for (i = 0; i < 4; i++)
537                 regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
538         for (i = 0; i < 4; i++)
539                 regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
540         for (i = 0; i < 4; i++)
541                 regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
542         for (i = 0; i < 4; i++)
543                 regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
544
545         for (i = 0; i < 10; i++)
546                 regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
547         for (i = 0; i < 8; i++)
548                 regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
549         for (i = 0; i < 8; i++)
550                 regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
551         for (i = 0; i < 16; i++)
552                 regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
553         for (i = 0; i < 16; i++)
554                 regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
555
556         for (i = 0; i < 4; i++)
557                 regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
558         for (i = 0; i < 4; i++)
559                 regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
560         for (i = 0; i < 4; i++)
561                 regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
562         for (i = 0; i < 4; i++)
563                 regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
564         for (i = 0; i < 4; i++)
565                 regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
566         for (i = 0; i < 4; i++)
567                 regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
568         for (i = 0; i < 4; i++)
569                 regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
570         for (i = 0; i < 4; i++)
571                 regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
572         for (i = 0; i < 4; i++)
573                 regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
574
575         for (i = 0; i < 4; i++)
576                 regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
577         for (i = 0; i < 4; i++)
578                 regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
579         for (i = 0; i < 32; i++)
580                 regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
581         for (i = 0; i < 128; i++)
582                 regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
583         for (i = 0; i < 128; i++)
584                 regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
585         for (i = 0; i < 4; i++)
586                 regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
587
588         regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
589         regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
590         regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
591         regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
592         if (hw->mac.type > e1000_82580) {
593                 regs_buff[551] = adapter->stats.o2bgptc;
594                 regs_buff[552] = adapter->stats.b2ospc;
595                 regs_buff[553] = adapter->stats.o2bspc;
596                 regs_buff[554] = adapter->stats.b2ogprc;
597         }
598 }
599
600 static int igb_get_eeprom_len(struct net_device *netdev)
601 {
602         struct igb_adapter *adapter = netdev_priv(netdev);
603         return adapter->hw.nvm.word_size * 2;
604 }
605
606 static int igb_get_eeprom(struct net_device *netdev,
607                           struct ethtool_eeprom *eeprom, u8 *bytes)
608 {
609         struct igb_adapter *adapter = netdev_priv(netdev);
610         struct e1000_hw *hw = &adapter->hw;
611         u16 *eeprom_buff;
612         int first_word, last_word;
613         int ret_val = 0;
614         u16 i;
615
616         if (eeprom->len == 0)
617                 return -EINVAL;
618
619         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
620
621         first_word = eeprom->offset >> 1;
622         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
623
624         eeprom_buff = kmalloc(sizeof(u16) *
625                         (last_word - first_word + 1), GFP_KERNEL);
626         if (!eeprom_buff)
627                 return -ENOMEM;
628
629         if (hw->nvm.type == e1000_nvm_eeprom_spi)
630                 ret_val = e1000_read_nvm(hw, first_word,
631                                          last_word - first_word + 1,
632                                          eeprom_buff);
633         else {
634                 for (i = 0; i < last_word - first_word + 1; i++) {
635                         ret_val = e1000_read_nvm(hw, first_word + i, 1,
636                                                  &eeprom_buff[i]);
637                         if (ret_val)
638                                 break;
639                 }
640         }
641
642         /* Device's eeprom is always little-endian, word addressable */
643         for (i = 0; i < last_word - first_word + 1; i++)
644                 eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
645
646         memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
647                         eeprom->len);
648         kfree(eeprom_buff);
649
650         return ret_val;
651 }
652
653 static int igb_set_eeprom(struct net_device *netdev,
654                           struct ethtool_eeprom *eeprom, u8 *bytes)
655 {
656         struct igb_adapter *adapter = netdev_priv(netdev);
657         struct e1000_hw *hw = &adapter->hw;
658         u16 *eeprom_buff;
659         void *ptr;
660         int max_len, first_word, last_word, ret_val = 0;
661         u16 i;
662
663         if (eeprom->len == 0)
664                 return -EOPNOTSUPP;
665
666         if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
667                 return -EFAULT;
668
669         max_len = hw->nvm.word_size * 2;
670
671         first_word = eeprom->offset >> 1;
672         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
673         eeprom_buff = kmalloc(max_len, GFP_KERNEL);
674         if (!eeprom_buff)
675                 return -ENOMEM;
676
677         ptr = (void *)eeprom_buff;
678
679         if (eeprom->offset & 1) {
680                 /* need read/modify/write of first changed EEPROM word */
681                 /* only the second byte of the word is being modified */
682                 ret_val = e1000_read_nvm(hw, first_word, 1,
683                                             &eeprom_buff[0]);
684                 ptr++;
685         }
686         if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
687                 /* need read/modify/write of last changed EEPROM word */
688                 /* only the first byte of the word is being modified */
689                 ret_val = e1000_read_nvm(hw, last_word, 1,
690                           &eeprom_buff[last_word - first_word]);
691         }
692
693         /* Device's eeprom is always little-endian, word addressable */
694         for (i = 0; i < last_word - first_word + 1; i++)
695                 le16_to_cpus(&eeprom_buff[i]);
696
697         memcpy(ptr, bytes, eeprom->len);
698
699         for (i = 0; i < last_word - first_word + 1; i++)
700                 cpu_to_le16s(&eeprom_buff[i]);
701
702         ret_val = e1000_write_nvm(hw, first_word,
703                                   last_word - first_word + 1, eeprom_buff);
704
705         /* Update the checksum over the first part of the EEPROM if needed
706          * and flush shadow RAM for 82573 controllers */
707         if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
708                 e1000_update_nvm_checksum(hw);
709
710         kfree(eeprom_buff);
711         return ret_val;
712 }
713
714 static void igb_get_drvinfo(struct net_device *netdev,
715                             struct ethtool_drvinfo *drvinfo)
716 {
717         struct igb_adapter *adapter = netdev_priv(netdev);
718
719         strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
720         strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
721
722         /* EEPROM image version # is reported as firmware version # for
723          * 82575 controllers */
724         snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
725                  (adapter->fw_version & 0xF000) >> 12,
726                  (adapter->fw_version & 0x0FF0) >> 4,
727                  adapter->fw_version & 0x000F);
728
729         strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
730         drvinfo->n_stats = IGB_STATS_LEN;
731         drvinfo->testinfo_len = IGB_TEST_LEN;
732         drvinfo->regdump_len = igb_get_regs_len(netdev);
733         drvinfo->eedump_len = igb_get_eeprom_len(netdev);
734 }
735
736 static void igb_get_ringparam(struct net_device *netdev,
737                               struct ethtool_ringparam *ring)
738 {
739         struct igb_adapter *adapter = netdev_priv(netdev);
740
741         ring->rx_max_pending = IGB_MAX_RXD;
742         ring->tx_max_pending = IGB_MAX_TXD;
743         ring->rx_mini_max_pending = 0;
744         ring->rx_jumbo_max_pending = 0;
745         ring->rx_pending = adapter->rx_ring_count;
746         ring->tx_pending = adapter->tx_ring_count;
747         ring->rx_mini_pending = 0;
748         ring->rx_jumbo_pending = 0;
749 }
750
751 static int igb_set_ringparam(struct net_device *netdev,
752                              struct ethtool_ringparam *ring)
753 {
754         struct igb_adapter *adapter = netdev_priv(netdev);
755         struct igb_ring *temp_ring;
756         int i, err = 0;
757         u16 new_rx_count, new_tx_count;
758
759         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
760                 return -EINVAL;
761
762         new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
763         new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
764         new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
765
766         new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
767         new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
768         new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
769
770         if ((new_tx_count == adapter->tx_ring_count) &&
771             (new_rx_count == adapter->rx_ring_count)) {
772                 /* nothing to do */
773                 return 0;
774         }
775
776         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
777                 usleep_range(1000, 2000);
778
779         if (!netif_running(adapter->netdev)) {
780                 for (i = 0; i < adapter->num_tx_queues; i++)
781                         adapter->tx_ring[i]->count = new_tx_count;
782                 for (i = 0; i < adapter->num_rx_queues; i++)
783                         adapter->rx_ring[i]->count = new_rx_count;
784                 adapter->tx_ring_count = new_tx_count;
785                 adapter->rx_ring_count = new_rx_count;
786                 goto clear_reset;
787         }
788
789         if (adapter->num_tx_queues > adapter->num_rx_queues)
790                 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
791         else
792                 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
793
794         if (!temp_ring) {
795                 err = -ENOMEM;
796                 goto clear_reset;
797         }
798
799         igb_down(adapter);
800
801         /*
802          * We can't just free everything and then setup again,
803          * because the ISRs in MSI-X mode get passed pointers
804          * to the tx and rx ring structs.
805          */
806         if (new_tx_count != adapter->tx_ring_count) {
807                 for (i = 0; i < adapter->num_tx_queues; i++) {
808                         memcpy(&temp_ring[i], adapter->tx_ring[i],
809                                sizeof(struct igb_ring));
810
811                         temp_ring[i].count = new_tx_count;
812                         err = igb_setup_tx_resources(&temp_ring[i]);
813                         if (err) {
814                                 while (i) {
815                                         i--;
816                                         igb_free_tx_resources(&temp_ring[i]);
817                                 }
818                                 goto err_setup;
819                         }
820                 }
821
822                 for (i = 0; i < adapter->num_tx_queues; i++) {
823                         igb_free_tx_resources(adapter->tx_ring[i]);
824
825                         memcpy(adapter->tx_ring[i], &temp_ring[i],
826                                sizeof(struct igb_ring));
827                 }
828
829                 adapter->tx_ring_count = new_tx_count;
830         }
831
832         if (new_rx_count != adapter->rx_ring_count) {
833                 for (i = 0; i < adapter->num_rx_queues; i++) {
834                         memcpy(&temp_ring[i], adapter->rx_ring[i],
835                                sizeof(struct igb_ring));
836
837                         temp_ring[i].count = new_rx_count;
838                         err = igb_setup_rx_resources(&temp_ring[i]);
839                         if (err) {
840                                 while (i) {
841                                         i--;
842                                         igb_free_rx_resources(&temp_ring[i]);
843                                 }
844                                 goto err_setup;
845                         }
846
847                 }
848
849                 for (i = 0; i < adapter->num_rx_queues; i++) {
850                         igb_free_rx_resources(adapter->rx_ring[i]);
851
852                         memcpy(adapter->rx_ring[i], &temp_ring[i],
853                                sizeof(struct igb_ring));
854                 }
855
856                 adapter->rx_ring_count = new_rx_count;
857         }
858 err_setup:
859         igb_up(adapter);
860         vfree(temp_ring);
861 clear_reset:
862         clear_bit(__IGB_RESETTING, &adapter->state);
863         return err;
864 }
865 static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
866                              int reg, u32 mask, u32 write)
867 {
868         struct e1000_hw *hw = &adapter->hw;
869         u32 pat, val;
870         static const u32 _test[] =
871                 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
872         for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
873                 E1000_WRITE_REG(hw, reg, (_test[pat] & write));
874                 val = E1000_READ_REG(hw, reg) & mask;
875                 if (val != (_test[pat] & write & mask)) {
876                         dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
877                                 "failed: got 0x%08X expected 0x%08X\n",
878                                 E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
879                         *data = E1000_REGISTER(hw, reg);
880                         return 1;
881                 }
882         }
883
884         return 0;
885 }
886
887 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
888                               int reg, u32 mask, u32 write)
889 {
890         struct e1000_hw *hw = &adapter->hw;
891         u32 val;
892         E1000_WRITE_REG(hw, reg, write & mask);
893         val = E1000_READ_REG(hw, reg);
894         if ((write & mask) != (val & mask)) {
895                 dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
896                         " got 0x%08X expected 0x%08X\n", reg,
897                         (val & mask), (write & mask));
898                 *data = E1000_REGISTER(hw, reg);
899                 return 1;
900         }
901
902         return 0;
903 }
904
905 #define REG_PATTERN_TEST(reg, mask, write) \
906         do { \
907                 if (reg_pattern_test(adapter, data, reg, mask, write)) \
908                         return 1; \
909         } while (0)
910
911 #define REG_SET_AND_CHECK(reg, mask, write) \
912         do { \
913                 if (reg_set_and_check(adapter, data, reg, mask, write)) \
914                         return 1; \
915         } while (0)
916
917 static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
918 {
919         struct e1000_hw *hw = &adapter->hw;
920         struct igb_reg_test *test;
921         u32 value, before, after;
922         u32 i, toggle;
923
924         switch (adapter->hw.mac.type) {
925         case e1000_i350:
926                 test = reg_test_i350;
927                 toggle = 0x7FEFF3FF;
928                 break;
929         case e1000_82580:
930                 test = reg_test_82580;
931                 toggle = 0x7FEFF3FF;
932                 break;
933         case e1000_82576:
934                 test = reg_test_82576;
935                 toggle = 0x7FFFF3FF;
936                 break;
937         default:
938                 test = reg_test_82575;
939                 toggle = 0x7FFFF3FF;
940                 break;
941         }
942
943         /* Because the status register is such a special case,
944          * we handle it separately from the rest of the register
945          * tests.  Some bits are read-only, some toggle, and some
946          * are writable on newer MACs.
947          */
948         before = E1000_READ_REG(hw, E1000_STATUS);
949         value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
950         E1000_WRITE_REG(hw, E1000_STATUS, toggle);
951         after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
952         if (value != after) {
953                 dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
954                         "got: 0x%08X expected: 0x%08X\n", after, value);
955                 *data = 1;
956                 return 1;
957         }
958         /* restore previous status */
959         E1000_WRITE_REG(hw, E1000_STATUS, before);
960
961         /* Perform the remainder of the register test, looping through
962          * the test table until we either fail or reach the null entry.
963          */
964         while (test->reg) {
965                 for (i = 0; i < test->array_len; i++) {
966                         switch (test->test_type) {
967                         case PATTERN_TEST:
968                                 REG_PATTERN_TEST(test->reg +
969                                                 (i * test->reg_offset),
970                                                 test->mask,
971                                                 test->write);
972                                 break;
973                         case SET_READ_TEST:
974                                 REG_SET_AND_CHECK(test->reg +
975                                                 (i * test->reg_offset),
976                                                 test->mask,
977                                                 test->write);
978                                 break;
979                         case WRITE_NO_TEST:
980                                 writel(test->write,
981                                        (adapter->hw.hw_addr + test->reg)
982                                         + (i * test->reg_offset));
983                                 break;
984                         case TABLE32_TEST:
985                                 REG_PATTERN_TEST(test->reg + (i * 4),
986                                                 test->mask,
987                                                 test->write);
988                                 break;
989                         case TABLE64_TEST_LO:
990                                 REG_PATTERN_TEST(test->reg + (i * 8),
991                                                 test->mask,
992                                                 test->write);
993                                 break;
994                         case TABLE64_TEST_HI:
995                                 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
996                                                 test->mask,
997                                                 test->write);
998                                 break;
999                         }
1000                 }
1001                 test++;
1002         }
1003
1004         *data = 0;
1005         return 0;
1006 }
1007
1008 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1009 {
1010         u16 temp;
1011         u16 checksum = 0;
1012         u16 i;
1013
1014         *data = 0;
1015         /* Read and add up the contents of the EEPROM */
1016         for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1017                 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
1018                         *data = 1;
1019                         break;
1020                 }
1021                 checksum += temp;
1022         }
1023
1024         /* If Checksum is not Correct return error else test passed */
1025         if ((checksum != (u16) NVM_SUM) && !(*data))
1026                 *data = 2;
1027
1028         return *data;
1029 }
1030
1031 static irqreturn_t igb_test_intr(int irq, void *data)
1032 {
1033         struct igb_adapter *adapter = (struct igb_adapter *) data;
1034         struct e1000_hw *hw = &adapter->hw;
1035
1036         adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
1037
1038         return IRQ_HANDLED;
1039 }
1040
1041 static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1042 {
1043         struct e1000_hw *hw = &adapter->hw;
1044         struct net_device *netdev = adapter->netdev;
1045         u32 mask, ics_mask, i = 0, shared_int = TRUE;
1046         u32 irq = adapter->pdev->irq;
1047
1048         *data = 0;
1049
1050         /* Hook up test interrupt handler just for this test */
1051         if (adapter->msix_entries) {
1052                 if (request_irq(adapter->msix_entries[0].vector,
1053                                 &igb_test_intr, 0, netdev->name, adapter)) {
1054                         *data = 1;
1055                         return -1;
1056                 }
1057         } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1058                 shared_int = FALSE;
1059                 if (request_irq(irq,
1060                                 igb_test_intr, 0, netdev->name, adapter)) {
1061                         *data = 1;
1062                         return -1;
1063                 }
1064         } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1065                                 netdev->name, adapter)) {
1066                 shared_int = FALSE;
1067         } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1068                  netdev->name, adapter)) {
1069                 *data = 1;
1070                 return -1;
1071         }
1072         dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
1073                  (shared_int ? "shared" : "unshared"));
1074
1075         /* Disable all the interrupts */
1076         E1000_WRITE_REG(hw, E1000_IMC, ~0);
1077         E1000_WRITE_FLUSH(hw);
1078         usleep_range(10000, 20000);
1079
1080         /* Define all writable bits for ICS */
1081         switch (hw->mac.type) {
1082         case e1000_82575:
1083                 ics_mask = 0x37F47EDD;
1084                 break;
1085         case e1000_82576:
1086                 ics_mask = 0x77D4FBFD;
1087                 break;
1088         case e1000_82580:
1089                 ics_mask = 0x77DCFED5;
1090                 break;
1091         case e1000_i350:
1092                 ics_mask = 0x77DCFED5;
1093                 break;
1094         default:
1095                 ics_mask = 0x7FFFFFFF;
1096                 break;
1097         }
1098
1099         /* Test each interrupt */
1100         for (; i < 31; i++) {
1101                 /* Interrupt to test */
1102                 mask = 1 << i;
1103
1104                 if (!(mask & ics_mask))
1105                         continue;
1106
1107                 if (!shared_int) {
1108                         /* Disable the interrupt to be reported in
1109                          * the cause register and then force the same
1110                          * interrupt and see if one gets posted.  If
1111                          * an interrupt was posted to the bus, the
1112                          * test failed.
1113                          */
1114                         adapter->test_icr = 0;
1115
1116                         /* Flush any pending interrupts */
1117                         E1000_WRITE_REG(hw, E1000_ICR, ~0);
1118
1119                         E1000_WRITE_REG(hw, E1000_IMC, mask);
1120                         E1000_WRITE_REG(hw, E1000_ICS, mask);
1121                         E1000_WRITE_FLUSH(hw);
1122                         usleep_range(10000, 20000);
1123
1124                         if (adapter->test_icr & mask) {
1125                                 *data = 3;
1126                                 break;
1127                         }
1128                 }
1129
1130                 /* Enable the interrupt to be reported in
1131                  * the cause register and then force the same
1132                  * interrupt and see if one gets posted.  If
1133                  * an interrupt was not posted to the bus, the
1134                  * test failed.
1135                  */
1136                 adapter->test_icr = 0;
1137
1138                 /* Flush any pending interrupts */
1139                 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1140
1141                 E1000_WRITE_REG(hw, E1000_IMS, mask);
1142                 E1000_WRITE_REG(hw, E1000_ICS, mask);
1143                 E1000_WRITE_FLUSH(hw);
1144                 usleep_range(10000, 20000);
1145
1146                 if (!(adapter->test_icr & mask)) {
1147                         *data = 4;
1148                         break;
1149                 }
1150
1151                 if (!shared_int) {
1152                         /* Disable the other interrupts to be reported in
1153                          * the cause register and then force the other
1154                          * interrupts and see if any get posted.  If
1155                          * an interrupt was posted to the bus, the
1156                          * test failed.
1157                          */
1158                         adapter->test_icr = 0;
1159
1160                         /* Flush any pending interrupts */
1161                         E1000_WRITE_REG(hw, E1000_ICR, ~0);
1162
1163                         E1000_WRITE_REG(hw, E1000_IMC, ~mask);
1164                         E1000_WRITE_REG(hw, E1000_ICS, ~mask);
1165                         E1000_WRITE_FLUSH(hw);
1166                         usleep_range(10000, 20000);
1167
1168                         if (adapter->test_icr & mask) {
1169                                 *data = 5;
1170                                 break;
1171                         }
1172                 }
1173         }
1174
1175         /* Disable all the interrupts */
1176         E1000_WRITE_REG(hw, E1000_IMC, ~0);
1177         E1000_WRITE_FLUSH(hw);
1178         usleep_range(10000, 20000);
1179
1180         /* Unhook test interrupt handler */
1181         if (adapter->msix_entries)
1182                 free_irq(adapter->msix_entries[0].vector, adapter);
1183         else
1184                 free_irq(irq, adapter);
1185
1186         return *data;
1187 }
1188
1189 static void igb_free_desc_rings(struct igb_adapter *adapter)
1190 {
1191         igb_free_tx_resources(&adapter->test_tx_ring);
1192         igb_free_rx_resources(&adapter->test_rx_ring);
1193 }
1194
1195 static int igb_setup_desc_rings(struct igb_adapter *adapter)
1196 {
1197         struct igb_ring *tx_ring = &adapter->test_tx_ring;
1198         struct igb_ring *rx_ring = &adapter->test_rx_ring;
1199         struct e1000_hw *hw = &adapter->hw;
1200         int ret_val;
1201
1202         /* Setup Tx descriptor ring and Tx buffers */
1203         tx_ring->count = IGB_DEFAULT_TXD;
1204         tx_ring->dev = pci_dev_to_dev(adapter->pdev);
1205         tx_ring->netdev = adapter->netdev;
1206         tx_ring->reg_idx = adapter->vfs_allocated_count;
1207
1208         if (igb_setup_tx_resources(tx_ring)) {
1209                 ret_val = 1;
1210                 goto err_nomem;
1211         }
1212
1213         igb_setup_tctl(adapter);
1214         igb_configure_tx_ring(adapter, tx_ring);
1215
1216         /* Setup Rx descriptor ring and Rx buffers */
1217         rx_ring->count = IGB_DEFAULT_RXD;
1218         rx_ring->dev = pci_dev_to_dev(adapter->pdev);
1219         rx_ring->netdev = adapter->netdev;
1220 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1221         rx_ring->rx_buffer_len = IGB_RXBUFFER_512;
1222 #endif
1223         rx_ring->reg_idx = adapter->vfs_allocated_count;
1224
1225         if (igb_setup_rx_resources(rx_ring)) {
1226                 ret_val = 2;
1227                 goto err_nomem;
1228         }
1229
1230         /* set the default queue to queue 0 of PF */
1231         E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
1232
1233         /* enable receive ring */
1234         igb_setup_rctl(adapter);
1235         igb_configure_rx_ring(adapter, rx_ring);
1236
1237         igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
1238
1239         return 0;
1240
1241 err_nomem:
1242         igb_free_desc_rings(adapter);
1243         return ret_val;
1244 }
1245
1246 static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1247 {
1248         struct e1000_hw *hw = &adapter->hw;
1249
1250         /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1251         e1000_write_phy_reg(hw, 29, 0x001F);
1252         e1000_write_phy_reg(hw, 30, 0x8FFC);
1253         e1000_write_phy_reg(hw, 29, 0x001A);
1254         e1000_write_phy_reg(hw, 30, 0x8FF0);
1255 }
1256
1257 static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1258 {
1259         struct e1000_hw *hw = &adapter->hw;
1260         u32 ctrl_reg = 0;
1261
1262         hw->mac.autoneg = FALSE;
1263
1264         if (hw->phy.type == e1000_phy_m88) {
1265                         /* Auto-MDI/MDIX Off */
1266                         e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1267                         /* reset to update Auto-MDI/MDIX */
1268                         e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1269                         /* autoneg off */
1270                         e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1271         } else {
1272                 /* enable MII loopback */
1273                 if (hw->phy.type == e1000_phy_82580) 
1274                         e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
1275         }
1276
1277         /* force 1000, set loopback  */
1278         e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1279
1280         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1281         /* Now set up the MAC to the same speed/duplex as the PHY. */
1282         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1283         ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1284         ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1285                      E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1286                      E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1287                      E1000_CTRL_FD |     /* Force Duplex to FULL */
1288                      E1000_CTRL_SLU);    /* Set link up enable bit */
1289
1290         if (hw->phy.type == e1000_phy_m88)
1291                 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1292
1293         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1294
1295         /* Disable the receiver on the PHY so when a cable is plugged in, the
1296          * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1297          */
1298         if (hw->phy.type == e1000_phy_m88)
1299                 igb_phy_disable_receiver(adapter);
1300         
1301         udelay(500);
1302         return 0;
1303 }
1304
1305 static int igb_set_phy_loopback(struct igb_adapter *adapter)
1306 {
1307         return igb_integrated_phy_loopback(adapter);
1308 }
1309
1310 static int igb_setup_loopback_test(struct igb_adapter *adapter)
1311 {
1312         struct e1000_hw *hw = &adapter->hw;
1313         u32 reg;
1314
1315         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1316
1317         /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1318         if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1319                 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1320                     (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1321                     (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1322                     (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1323
1324                         /* Enable DH89xxCC MPHY for near end loopback */
1325                         reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1326                         reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1327                                 E1000_MPHY_PCS_CLK_REG_OFFSET;
1328                         E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1329
1330                         reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1331                         reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; 
1332                         E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1333                 }
1334
1335                 reg = E1000_READ_REG(hw, E1000_RCTL);
1336                 reg |= E1000_RCTL_LBM_TCVR;
1337                 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1338
1339                 E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1340
1341                 reg = E1000_READ_REG(hw, E1000_CTRL);
1342                 reg &= ~(E1000_CTRL_RFCE |
1343                          E1000_CTRL_TFCE |
1344                          E1000_CTRL_LRST);
1345                 reg |= E1000_CTRL_SLU |
1346                        E1000_CTRL_FD;
1347                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1348
1349                 /* Unset switch control to serdes energy detect */
1350                 reg = E1000_READ_REG(hw, E1000_CONNSW);
1351                 reg &= ~E1000_CONNSW_ENRGSRC;
1352                 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1353
1354                 /* Set PCS register for forced speed */
1355                 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1356                 reg &= ~E1000_PCS_LCTL_AN_ENABLE;     /* Disable Autoneg*/
1357                 reg |= E1000_PCS_LCTL_FLV_LINK_UP |   /* Force link up */
1358                        E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
1359                        E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
1360                        E1000_PCS_LCTL_FSD |           /* Force Speed */
1361                        E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
1362                 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1363
1364                 return 0;
1365         }
1366
1367         return igb_set_phy_loopback(adapter);
1368 }
1369
1370 static void igb_loopback_cleanup(struct igb_adapter *adapter)
1371 {
1372         struct e1000_hw *hw = &adapter->hw;
1373         u32 rctl;
1374         u16 phy_reg;
1375
1376         if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1377             (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1378             (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1379             (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1380                 u32 reg;
1381
1382                 /* Disable near end loopback on DH89xxCC */
1383                 reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1384                 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
1385                         E1000_MPHY_PCS_CLK_REG_OFFSET;
1386                 E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1387
1388                 reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1389                 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1390                 E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1391         }
1392                 
1393         rctl = E1000_READ_REG(hw, E1000_RCTL);
1394         rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1395         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1396
1397         hw->mac.autoneg = TRUE;
1398         e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1399         if (phy_reg & MII_CR_LOOPBACK) {
1400                 phy_reg &= ~MII_CR_LOOPBACK;
1401                 e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1402                 e1000_phy_commit(hw);
1403         }
1404 }
1405 static void igb_create_lbtest_frame(struct sk_buff *skb,
1406                                     unsigned int frame_size)
1407 {
1408         memset(skb->data, 0xFF, frame_size);
1409         frame_size /= 2;
1410         memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1411         memset(&skb->data[frame_size + 10], 0xBE, 1);
1412         memset(&skb->data[frame_size + 12], 0xAF, 1);
1413 }
1414
1415 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1416 {
1417         frame_size /= 2;
1418         if (*(skb->data + 3) == 0xFF) {
1419                 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1420                    (*(skb->data + frame_size + 12) == 0xAF)) {
1421                         return 0;
1422                 }
1423         }
1424         return 13;
1425 }
1426
1427 static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
1428                                 struct igb_ring *tx_ring,
1429                                 unsigned int size)
1430 {
1431         union e1000_adv_rx_desc *rx_desc;
1432         struct igb_rx_buffer *rx_buffer_info;
1433         struct igb_tx_buffer *tx_buffer_info;
1434 #ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1435         const int bufsz = rx_ring->rx_buffer_len;
1436 #else
1437         const int bufsz = IGB_RX_HDR_LEN;
1438 #endif
1439         u16 rx_ntc, tx_ntc, count = 0;
1440
1441         /* initialize next to clean and descriptor values */
1442         rx_ntc = rx_ring->next_to_clean;
1443         tx_ntc = tx_ring->next_to_clean;
1444         rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1445
1446         while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1447                 /* check rx buffer */
1448                 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1449
1450                 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1451                 dma_unmap_single(rx_ring->dev,
1452                                  rx_buffer_info->dma,
1453                                  bufsz,
1454                                  DMA_FROM_DEVICE);
1455                 rx_buffer_info->dma = 0;
1456
1457                 /* verify contents of skb */
1458                 if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
1459                         count++;
1460
1461                 /* unmap buffer on tx side */
1462                 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1463                 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1464
1465                 /* increment rx/tx next to clean counters */
1466                 rx_ntc++;
1467                 if (rx_ntc == rx_ring->count)
1468                         rx_ntc = 0;
1469                 tx_ntc++;
1470                 if (tx_ntc == tx_ring->count)
1471                         tx_ntc = 0;
1472
1473                 /* fetch next descriptor */
1474                 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1475         }
1476
1477         /* re-map buffers to ring, store next to clean values */
1478         igb_alloc_rx_buffers(rx_ring, count);
1479         rx_ring->next_to_clean = rx_ntc;
1480         tx_ring->next_to_clean = tx_ntc;
1481
1482         return count;
1483 }
1484
1485 static int igb_run_loopback_test(struct igb_adapter *adapter)
1486 {
1487         struct igb_ring *tx_ring = &adapter->test_tx_ring;
1488         struct igb_ring *rx_ring = &adapter->test_rx_ring;
1489         u16 i, j, lc, good_cnt;
1490         int ret_val = 0;
1491         unsigned int size = IGB_RXBUFFER_512;
1492         netdev_tx_t tx_ret_val;
1493         struct sk_buff *skb;
1494
1495         /* allocate test skb */
1496         skb = alloc_skb(size, GFP_KERNEL);
1497         if (!skb)
1498                 return 11;
1499
1500         /* place data into test skb */
1501         igb_create_lbtest_frame(skb, size);
1502         skb_put(skb, size);
1503
1504         /*
1505          * Calculate the loop count based on the largest descriptor ring
1506          * The idea is to wrap the largest ring a number of times using 64
1507          * send/receive pairs during each loop
1508          */
1509
1510         if (rx_ring->count <= tx_ring->count)
1511                 lc = ((tx_ring->count / 64) * 2) + 1;
1512         else
1513                 lc = ((rx_ring->count / 64) * 2) + 1;
1514
1515         for (j = 0; j <= lc; j++) { /* loop count loop */
1516                 /* reset count of good packets */
1517                 good_cnt = 0;
1518
1519                 /* place 64 packets on the transmit queue*/
1520                 for (i = 0; i < 64; i++) {
1521                         skb_get(skb);
1522                         tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
1523                         if (tx_ret_val == NETDEV_TX_OK)
1524                                 good_cnt++;
1525                 }
1526
1527                 if (good_cnt != 64) {
1528                         ret_val = 12;
1529                         break;
1530                 }
1531
1532                 /* allow 200 milliseconds for packets to go from tx to rx */
1533                 msleep(200);
1534
1535                 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1536                 if (good_cnt != 64) {
1537                         ret_val = 13;
1538                         break;
1539                 }
1540         } /* end loop count loop */
1541
1542         /* free the original skb */
1543         kfree_skb(skb);
1544
1545         return ret_val;
1546 }
1547
1548 static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1549 {
1550         /* PHY loopback cannot be performed if SoL/IDER
1551          * sessions are active */
1552         if (e1000_check_reset_block(&adapter->hw)) {
1553                 dev_err(pci_dev_to_dev(adapter->pdev),
1554                         "Cannot do PHY loopback test "
1555                         "when SoL/IDER is active.\n");
1556                 *data = 0;
1557                 goto out;
1558         }
1559         *data = igb_setup_desc_rings(adapter);
1560         if (*data)
1561                 goto out;
1562         *data = igb_setup_loopback_test(adapter);
1563         if (*data)
1564                 goto err_loopback;
1565         *data = igb_run_loopback_test(adapter);
1566
1567         igb_loopback_cleanup(adapter);
1568
1569 err_loopback:
1570         igb_free_desc_rings(adapter);
1571 out:
1572         return *data;
1573 }
1574
1575 static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1576 {
1577         u32 link;
1578         int i, time;
1579
1580         *data = 0;
1581         time = 0;
1582         if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1583                 int i = 0;
1584                 adapter->hw.mac.serdes_has_link = FALSE;
1585
1586                 /* On some blade server designs, link establishment
1587                  * could take as long as 2-3 minutes */
1588                 do {
1589                         e1000_check_for_link(&adapter->hw);
1590                         if (adapter->hw.mac.serdes_has_link)
1591                                 goto out;
1592                         msleep(20);
1593                 } while (i++ < 3750);
1594
1595                 *data = 1;
1596         } else {
1597                 for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
1598                 link = igb_has_link(adapter);
1599                         if (link)
1600                                 goto out;
1601                         else {
1602                                 time++;
1603                                 msleep(1000);
1604                         }
1605                 }
1606                 if (!link)
1607                         *data = 1;
1608         }
1609         out:
1610                 return *data;
1611 }
1612
1613 static void igb_diag_test(struct net_device *netdev,
1614                           struct ethtool_test *eth_test, u64 *data)
1615 {
1616         struct igb_adapter *adapter = netdev_priv(netdev);
1617         u16 autoneg_advertised;
1618         u8 forced_speed_duplex, autoneg;
1619         bool if_running = netif_running(netdev);
1620
1621         set_bit(__IGB_TESTING, &adapter->state);
1622         if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1623                 /* Offline tests */
1624
1625                 /* save speed, duplex, autoneg settings */
1626                 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1627                 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1628                 autoneg = adapter->hw.mac.autoneg;
1629
1630                 dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
1631
1632                 /* power up link for link test */
1633                         igb_power_up_link(adapter);
1634                 
1635                 /* Link test performed before hardware reset so autoneg doesn't
1636                  * interfere with test result */
1637                 if (igb_link_test(adapter, &data[4]))
1638                         eth_test->flags |= ETH_TEST_FL_FAILED;
1639
1640                 if (if_running)
1641                         /* indicate we're in test mode */
1642                         dev_close(netdev);
1643                 else
1644                         igb_reset(adapter);
1645
1646                 if (igb_reg_test(adapter, &data[0]))
1647                         eth_test->flags |= ETH_TEST_FL_FAILED;
1648
1649                 igb_reset(adapter);
1650                 if (igb_eeprom_test(adapter, &data[1]))
1651                         eth_test->flags |= ETH_TEST_FL_FAILED;
1652
1653                 igb_reset(adapter);
1654                 if (igb_intr_test(adapter, &data[2]))
1655                         eth_test->flags |= ETH_TEST_FL_FAILED;
1656
1657                 igb_reset(adapter);
1658                 /* power up link for loopback test */
1659                         igb_power_up_link(adapter);
1660
1661                 if (igb_loopback_test(adapter, &data[3]))
1662                         eth_test->flags |= ETH_TEST_FL_FAILED;
1663
1664                 /* restore speed, duplex, autoneg settings */
1665                 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1666                 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1667                 adapter->hw.mac.autoneg = autoneg;
1668
1669                 /* force this routine to wait until autoneg complete/timeout */
1670                 adapter->hw.phy.autoneg_wait_to_complete = TRUE;
1671                 igb_reset(adapter);
1672                 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
1673
1674                 clear_bit(__IGB_TESTING, &adapter->state);
1675                 if (if_running)
1676                         dev_open(netdev);
1677         } else {
1678                 dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
1679
1680                 /* PHY is powered down when interface is down */
1681                 if (if_running && igb_link_test(adapter, &data[4]))
1682                         eth_test->flags |= ETH_TEST_FL_FAILED;
1683                 else
1684                         data[4] = 0;
1685
1686                 /* Online tests aren't run; pass by default */
1687                 data[0] = 0;
1688                 data[1] = 0;
1689                 data[2] = 0;
1690                 data[3] = 0;
1691
1692                 clear_bit(__IGB_TESTING, &adapter->state);
1693         }
1694         msleep_interruptible(4 * 1000);
1695 }
1696
1697 static int igb_wol_exclusion(struct igb_adapter *adapter,
1698                              struct ethtool_wolinfo *wol)
1699 {
1700         struct e1000_hw *hw = &adapter->hw;
1701         int retval = 1; /* fail by default */
1702
1703         switch (hw->device_id) {
1704         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1705                 /* WoL not supported */
1706                 wol->supported = 0;
1707                 break;
1708         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1709         case E1000_DEV_ID_82576_FIBER:
1710         case E1000_DEV_ID_82576_SERDES:
1711                 /* Wake events not supported on port B */
1712                 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
1713                         wol->supported = 0;
1714                         break;
1715                 }
1716                 /* return success for non excluded adapter ports */
1717                 retval = 0;
1718                 break;
1719         case E1000_DEV_ID_82576_QUAD_COPPER:
1720         case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1721                 /* quad port adapters only support WoL on port A */
1722                 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1723                         wol->supported = 0;
1724                         break;
1725                 }
1726                 /* return success for non excluded adapter ports */
1727                 retval = 0;
1728                 break;
1729         default:
1730                 /* dual port cards only support WoL on port A from now on
1731                  * unless it was enabled in the eeprom for port B
1732                  * so exclude FUNC_1 ports from having WoL enabled */
1733                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1734                     !adapter->eeprom_wol) {
1735                         wol->supported = 0;
1736                         break;
1737                 }
1738
1739                 retval = 0;
1740         }
1741
1742         return retval;
1743 }
1744
1745 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1746 {
1747         struct igb_adapter *adapter = netdev_priv(netdev);
1748
1749         wol->supported = WAKE_UCAST | WAKE_MCAST |
1750                          WAKE_BCAST | WAKE_MAGIC |
1751                          WAKE_PHY;
1752         wol->wolopts = 0;
1753
1754         /* this function will set ->supported = 0 and return 1 if wol is not
1755          * supported by this hardware */
1756         if (igb_wol_exclusion(adapter, wol) ||
1757             !device_can_wakeup(&adapter->pdev->dev))
1758                 return;
1759
1760         /* apply any specific unsupported masks here */
1761         switch (adapter->hw.device_id) {
1762         default:
1763                 break;
1764         }
1765
1766         if (adapter->wol & E1000_WUFC_EX)
1767                 wol->wolopts |= WAKE_UCAST;
1768         if (adapter->wol & E1000_WUFC_MC)
1769                 wol->wolopts |= WAKE_MCAST;
1770         if (adapter->wol & E1000_WUFC_BC)
1771                 wol->wolopts |= WAKE_BCAST;
1772         if (adapter->wol & E1000_WUFC_MAG)
1773                 wol->wolopts |= WAKE_MAGIC;
1774         if (adapter->wol & E1000_WUFC_LNKC)
1775                 wol->wolopts |= WAKE_PHY;
1776 }
1777
1778 static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1779 {
1780         struct igb_adapter *adapter = netdev_priv(netdev);
1781
1782         if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1783                 return -EOPNOTSUPP;
1784
1785         if (igb_wol_exclusion(adapter, wol) ||
1786             !device_can_wakeup(&adapter->pdev->dev))
1787                 return wol->wolopts ? -EOPNOTSUPP : 0;
1788         /* these settings will always override what we currently have */
1789         adapter->wol = 0;
1790
1791         if (wol->wolopts & WAKE_UCAST)
1792                 adapter->wol |= E1000_WUFC_EX;
1793         if (wol->wolopts & WAKE_MCAST)
1794                 adapter->wol |= E1000_WUFC_MC;
1795         if (wol->wolopts & WAKE_BCAST)
1796                 adapter->wol |= E1000_WUFC_BC;
1797         if (wol->wolopts & WAKE_MAGIC)
1798                 adapter->wol |= E1000_WUFC_MAG;
1799         if (wol->wolopts & WAKE_PHY)
1800                 adapter->wol |= E1000_WUFC_LNKC;
1801         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1802
1803         return 0;
1804 }
1805
1806 /* bit defines for adapter->led_status */
1807 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
1808 static int igb_set_phys_id(struct net_device *netdev,
1809                            enum ethtool_phys_id_state state)
1810 {
1811         struct igb_adapter *adapter = netdev_priv(netdev);
1812         struct e1000_hw *hw = &adapter->hw;
1813
1814         switch (state) {
1815         case ETHTOOL_ID_ACTIVE:
1816                 e1000_blink_led(hw);
1817                 return 2;
1818         case ETHTOOL_ID_ON:
1819                 e1000_led_on(hw);
1820                 break;
1821         case ETHTOOL_ID_OFF:
1822                 e1000_led_off(hw);
1823                 break;
1824         case ETHTOOL_ID_INACTIVE:
1825                 e1000_led_off(hw);
1826                 e1000_cleanup_led(hw);
1827                 break;
1828         }
1829
1830         return 0;
1831 }
1832 #else
1833 static int igb_phys_id(struct net_device *netdev, u32 data)
1834 {
1835         struct igb_adapter *adapter = netdev_priv(netdev);
1836         struct e1000_hw *hw = &adapter->hw;
1837         unsigned long timeout;
1838
1839         timeout = data * 1000;
1840
1841         /*
1842          *  msleep_interruptable only accepts unsigned int so we are limited
1843          * in how long a duration we can wait
1844          */
1845         if (!timeout || timeout > UINT_MAX)
1846                 timeout = UINT_MAX;
1847
1848         e1000_blink_led(hw);
1849         msleep_interruptible(timeout);
1850
1851         e1000_led_off(hw);
1852         e1000_cleanup_led(hw);
1853
1854         return 0;
1855 }
1856 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
1857
1858 static int igb_set_coalesce(struct net_device *netdev,
1859                             struct ethtool_coalesce *ec)
1860 {
1861         struct igb_adapter *adapter = netdev_priv(netdev);
1862         int i;
1863
1864         if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1865             ((ec->rx_coalesce_usecs > 3) &&
1866              (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1867             (ec->rx_coalesce_usecs == 2))
1868             {
1869                 printk("set_coalesce:invalid parameter..");
1870                 return -EINVAL;
1871         }
1872
1873         if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1874             ((ec->tx_coalesce_usecs > 3) &&
1875              (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1876             (ec->tx_coalesce_usecs == 2))
1877                 return -EINVAL;
1878
1879         if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1880                 return -EINVAL;
1881
1882         if (ec->tx_max_coalesced_frames_irq)
1883                 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
1884
1885         /* If ITR is disabled, disable DMAC */
1886         if (ec->rx_coalesce_usecs == 0) {
1887                 adapter->dmac = IGB_DMAC_DISABLE;
1888         }
1889         
1890         /* convert to rate of irq's per second */
1891         if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1892                 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1893         else
1894                 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1895
1896         /* convert to rate of irq's per second */
1897         if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1898                 adapter->tx_itr_setting = adapter->rx_itr_setting;
1899         else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1900                 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1901         else
1902                 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1903
1904         for (i = 0; i < adapter->num_q_vectors; i++) {
1905                 struct igb_q_vector *q_vector = adapter->q_vector[i];
1906                 q_vector->tx.work_limit = adapter->tx_work_limit;
1907                 if (q_vector->rx.ring)
1908                         q_vector->itr_val = adapter->rx_itr_setting;
1909                 else
1910                         q_vector->itr_val = adapter->tx_itr_setting;
1911                 if (q_vector->itr_val && q_vector->itr_val <= 3)
1912                         q_vector->itr_val = IGB_START_ITR;
1913                 q_vector->set_itr = 1;
1914         }
1915
1916         return 0;
1917 }
1918
1919 static int igb_get_coalesce(struct net_device *netdev,
1920                             struct ethtool_coalesce *ec)
1921 {
1922         struct igb_adapter *adapter = netdev_priv(netdev);
1923
1924         if (adapter->rx_itr_setting <= 3)
1925                 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1926         else
1927                 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1928
1929         ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
1930
1931         if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1932                 if (adapter->tx_itr_setting <= 3)
1933                         ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1934                 else
1935                         ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1936         }
1937
1938         return 0;
1939 }
1940
1941 static int igb_nway_reset(struct net_device *netdev)
1942 {
1943         struct igb_adapter *adapter = netdev_priv(netdev);
1944         if (netif_running(netdev))
1945                 igb_reinit_locked(adapter);
1946         return 0;
1947 }
1948
1949 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
1950 static int igb_get_sset_count(struct net_device *netdev, int sset)
1951 {
1952         switch (sset) {
1953         case ETH_SS_STATS:
1954                 return IGB_STATS_LEN;
1955         case ETH_SS_TEST:
1956                 return IGB_TEST_LEN;
1957         default:
1958                 return -ENOTSUPP;
1959         }
1960 }
1961 #else
1962 static int igb_get_stats_count(struct net_device *netdev)
1963 {
1964         return IGB_STATS_LEN;
1965 }
1966
1967 static int igb_diag_test_count(struct net_device *netdev)
1968 {
1969         return IGB_TEST_LEN;
1970 }
1971 #endif
1972
1973 static void igb_get_ethtool_stats(struct net_device *netdev,
1974                                   struct ethtool_stats *stats, u64 *data)
1975 {
1976         struct igb_adapter *adapter = netdev_priv(netdev);
1977 #ifdef HAVE_NETDEV_STATS_IN_NETDEV
1978         struct net_device_stats *net_stats = &netdev->stats;
1979 #else
1980         struct net_device_stats *net_stats = &adapter->net_stats;
1981 #endif
1982         u64 *queue_stat;
1983         int i, j, k;
1984         char *p;
1985
1986         igb_update_stats(adapter);
1987
1988         for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1989                 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1990                 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1991                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1992         }
1993         for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
1994                 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
1995                 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
1996                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1997         }
1998         for (j = 0; j < adapter->num_tx_queues; j++) {
1999                 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
2000                 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
2001                         data[i] = queue_stat[k];
2002         }
2003         for (j = 0; j < adapter->num_rx_queues; j++) {
2004                 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
2005                 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
2006                         data[i] = queue_stat[k];
2007         }
2008 }
2009
2010 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2011 {
2012         struct igb_adapter *adapter = netdev_priv(netdev);
2013         u8 *p = data;
2014         int i;
2015
2016         switch (stringset) {
2017         case ETH_SS_TEST:
2018                 memcpy(data, *igb_gstrings_test,
2019                         IGB_TEST_LEN*ETH_GSTRING_LEN);
2020                 break;
2021         case ETH_SS_STATS:
2022                 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2023                         memcpy(p, igb_gstrings_stats[i].stat_string,
2024                                ETH_GSTRING_LEN);
2025                         p += ETH_GSTRING_LEN;
2026                 }
2027                 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2028                         memcpy(p, igb_gstrings_net_stats[i].stat_string,
2029                                ETH_GSTRING_LEN);
2030                         p += ETH_GSTRING_LEN;
2031                 }
2032                 for (i = 0; i < adapter->num_tx_queues; i++) {
2033                         sprintf(p, "tx_queue_%u_packets", i);
2034                         p += ETH_GSTRING_LEN;
2035                         sprintf(p, "tx_queue_%u_bytes", i);
2036                         p += ETH_GSTRING_LEN;
2037                         sprintf(p, "tx_queue_%u_restart", i);
2038                         p += ETH_GSTRING_LEN;
2039                 }
2040                 for (i = 0; i < adapter->num_rx_queues; i++) {
2041                         sprintf(p, "rx_queue_%u_packets", i);
2042                         p += ETH_GSTRING_LEN;
2043                         sprintf(p, "rx_queue_%u_bytes", i);
2044                         p += ETH_GSTRING_LEN;
2045                         sprintf(p, "rx_queue_%u_drops", i);
2046                         p += ETH_GSTRING_LEN;
2047                         sprintf(p, "rx_queue_%u_csum_err", i);
2048                         p += ETH_GSTRING_LEN;
2049                         sprintf(p, "rx_queue_%u_alloc_failed", i);
2050                         p += ETH_GSTRING_LEN;
2051                 }
2052 /*              BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2053                 break;
2054         }
2055 }
2056
2057 #ifdef CONFIG_PM_RUNTIME
2058 static int igb_ethtool_begin(struct net_device *netdev)
2059 {
2060         struct igb_adapter *adapter = netdev_priv(netdev);
2061
2062         pm_runtime_get_sync(&adapter->pdev->dev);
2063
2064         return 0;
2065 }
2066
2067 static void igb_ethtool_complete(struct net_device *netdev)
2068 {
2069         struct igb_adapter *adapter = netdev_priv(netdev);
2070
2071         pm_runtime_put(&adapter->pdev->dev);
2072 }
2073 #endif /* CONFIG_PM_RUNTIME */
2074
2075 #ifndef HAVE_NDO_SET_FEATURES
2076 static u32 igb_get_rx_csum(struct net_device *netdev)
2077 {
2078         struct igb_adapter *adapter = netdev_priv(netdev);
2079         return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
2080 }
2081
2082 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
2083 {
2084         struct igb_adapter *adapter = netdev_priv(netdev);
2085         int i;
2086
2087         for (i = 0; i < adapter->rss_queues; i++) {
2088                 struct igb_ring *ring = adapter->rx_ring[i];
2089                 if (data)
2090                         set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
2091                 else
2092                         clear_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
2093         }
2094
2095         return 0;
2096 }
2097
2098 static u32 igb_get_tx_csum(struct net_device *netdev)
2099 {
2100         return (netdev->features & NETIF_F_IP_CSUM) != 0;
2101 }
2102
2103 static int igb_set_tx_csum(struct net_device *netdev, u32 data)
2104 {
2105         struct igb_adapter *adapter = netdev_priv(netdev);
2106
2107         if (data) {
2108 #ifdef NETIF_F_IPV6_CSUM
2109                 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2110                 if (adapter->hw.mac.type >= e1000_82576)
2111                         netdev->features |= NETIF_F_SCTP_CSUM;
2112         } else {
2113                 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2114                                       NETIF_F_SCTP_CSUM);
2115 #else
2116                 netdev->features |= NETIF_F_IP_CSUM;
2117                 if (adapter->hw.mac.type == e1000_82576)
2118                         netdev->features |= NETIF_F_SCTP_CSUM;
2119         } else {
2120                 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
2121 #endif
2122         }
2123
2124         return 0;
2125 }
2126
2127 #ifdef NETIF_F_TSO
2128 static int igb_set_tso(struct net_device *netdev, u32 data)
2129 {
2130         struct igb_adapter *adapter = netdev_priv(netdev);
2131 #ifndef HAVE_NETDEV_VLAN_FEATURES
2132         int i;
2133         struct net_device *v_netdev;
2134 #endif
2135
2136         if (data) {
2137                 netdev->features |= NETIF_F_TSO;
2138 #ifdef NETIF_F_TSO6
2139                 netdev->features |= NETIF_F_TSO6;
2140 #endif
2141         } else {
2142                 netdev->features &= ~NETIF_F_TSO;
2143 #ifdef NETIF_F_TSO6
2144                 netdev->features &= ~NETIF_F_TSO6;
2145 #endif
2146 #ifndef HAVE_NETDEV_VLAN_FEATURES
2147                 /* disable TSO on all VLANs if they're present */
2148                 if (!adapter->vlgrp)
2149                         goto tso_out;
2150                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2151                         v_netdev = vlan_group_get_device(adapter->vlgrp, i);
2152                         if (!v_netdev)
2153                                 continue;
2154
2155                         v_netdev->features &= ~NETIF_F_TSO;
2156 #ifdef NETIF_F_TSO6
2157                         v_netdev->features &= ~NETIF_F_TSO6;
2158 #endif
2159                         vlan_group_set_device(adapter->vlgrp, i, v_netdev);
2160                 }
2161 #endif /* HAVE_NETDEV_VLAN_FEATURES */
2162         }
2163
2164 #ifndef HAVE_NETDEV_VLAN_FEATURES
2165 tso_out:
2166 #endif /* HAVE_NETDEV_VLAN_FEATURES */
2167         dev_info(pci_dev_to_dev(adapter->pdev), "TSO is %s\n",
2168                  data ? "Enabled" : "Disabled");
2169         return 0;
2170 }
2171
2172 #endif /* NETIF_F_TSO */
2173 #ifdef ETHTOOL_GFLAGS
2174 static int igb_set_flags(struct net_device *netdev, u32 data)
2175 {
2176         u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2177                               ETH_FLAG_RXHASH;
2178 #ifndef HAVE_VLAN_RX_REGISTER
2179         u32 changed = netdev->features ^ data;
2180 #endif
2181         int rc;
2182 #ifndef IGB_NO_LRO
2183
2184         supported_flags |= ETH_FLAG_LRO;
2185 #endif
2186         /*
2187          * Since there is no support for separate tx vlan accel
2188          * enabled make sure tx flag is cleared if rx is.
2189          */
2190         if (!(data & ETH_FLAG_RXVLAN))
2191                 data &= ~ETH_FLAG_TXVLAN;
2192
2193         rc = ethtool_op_set_flags(netdev, data, supported_flags);
2194         if (rc)
2195                 return rc;
2196 #ifndef HAVE_VLAN_RX_REGISTER
2197
2198         if (changed & ETH_FLAG_RXVLAN)
2199                 igb_vlan_mode(netdev, data);
2200 #endif
2201
2202         return 0;
2203 }
2204
2205 #endif /* ETHTOOL_GFLAGS */
2206 #endif /* HAVE_NDO_SET_FEATURES */
2207 #ifdef ETHTOOL_SADV_COAL
2208 static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
2209 {
2210         struct igb_adapter *adapter = netdev_priv(netdev);
2211
2212         switch (edata->data) {
2213         case IGB_DMAC_DISABLE:
2214                 adapter->dmac = edata->data;
2215                 break;
2216         case IGB_DMAC_MIN:
2217                 adapter->dmac = edata->data;
2218                 break;
2219         case IGB_DMAC_500:
2220                 adapter->dmac = edata->data;
2221                 break;
2222         case IGB_DMAC_EN_DEFAULT:
2223                 adapter->dmac = edata->data;
2224                 break;
2225         case IGB_DMAC_2000:
2226                 adapter->dmac = edata->data;
2227                 break;
2228         case IGB_DMAC_3000:
2229                 adapter->dmac = edata->data;
2230                 break;
2231         case IGB_DMAC_4000:
2232                 adapter->dmac = edata->data;
2233                 break;
2234         case IGB_DMAC_5000:
2235                 adapter->dmac = edata->data;
2236                 break;
2237         case IGB_DMAC_6000:
2238                 adapter->dmac = edata->data;
2239                 break;
2240         case IGB_DMAC_7000:
2241                 adapter->dmac = edata->data;
2242                 break;
2243         case IGB_DMAC_8000:
2244                 adapter->dmac = edata->data;
2245                 break;
2246         case IGB_DMAC_9000:
2247                 adapter->dmac = edata->data;
2248                 break;
2249         case IGB_DMAC_MAX:
2250                 adapter->dmac = edata->data;
2251                 break;
2252         default:
2253                 adapter->dmac = IGB_DMAC_DISABLE;
2254                 printk("set_dmac: invalid setting, setting DMAC to %d\n",
2255                         adapter->dmac);
2256         }
2257         printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
2258         return 0;
2259 }
2260 #endif /* ETHTOOL_SADV_COAL */
2261 #ifdef ETHTOOL_GADV_COAL
2262 static void igb_get_dmac(struct net_device *netdev,
2263                             struct ethtool_value *edata)
2264 {
2265         struct igb_adapter *adapter = netdev_priv(netdev);
2266         edata->data = adapter->dmac;
2267         
2268         return;
2269 }
2270 #endif
2271 static struct ethtool_ops igb_ethtool_ops = {
2272         .get_settings           = igb_get_settings,
2273         .set_settings           = igb_set_settings,
2274         .get_drvinfo            = igb_get_drvinfo,
2275         .get_regs_len           = igb_get_regs_len,
2276         .get_regs               = igb_get_regs,
2277         .get_wol                = igb_get_wol,
2278         .set_wol                = igb_set_wol,
2279         .get_msglevel           = igb_get_msglevel,
2280         .set_msglevel           = igb_set_msglevel,
2281         .nway_reset             = igb_nway_reset,
2282         .get_link               = igb_get_link,
2283         .get_eeprom_len         = igb_get_eeprom_len,
2284         .get_eeprom             = igb_get_eeprom,
2285         .set_eeprom             = igb_set_eeprom,
2286         .get_ringparam          = igb_get_ringparam,
2287         .set_ringparam          = igb_set_ringparam,
2288         .get_pauseparam         = igb_get_pauseparam,
2289         .set_pauseparam         = igb_set_pauseparam,
2290         .self_test              = igb_diag_test,
2291         .get_strings            = igb_get_strings,
2292 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
2293         .set_phys_id            = igb_set_phys_id,
2294 #else
2295         .phys_id                = igb_phys_id,
2296 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
2297 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
2298         .get_sset_count         = igb_get_sset_count,
2299 #else
2300         .get_stats_count        = igb_get_stats_count,
2301         .self_test_count        = igb_diag_test_count,
2302 #endif
2303         .get_ethtool_stats      = igb_get_ethtool_stats,
2304 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
2305         .get_perm_addr          = ethtool_op_get_perm_addr,
2306 #endif
2307         .get_coalesce           = igb_get_coalesce,
2308         .set_coalesce           = igb_set_coalesce,
2309 #ifdef CONFIG_PM_RUNTIME
2310         .begin                  = igb_ethtool_begin,
2311         .complete               = igb_ethtool_complete,
2312 #endif /* CONFIG_PM_RUNTIME */
2313 #ifndef HAVE_NDO_SET_FEATURES
2314         .get_rx_csum            = igb_get_rx_csum,
2315         .set_rx_csum            = igb_set_rx_csum,
2316         .get_tx_csum            = igb_get_tx_csum,
2317         .set_tx_csum            = igb_set_tx_csum,
2318         .get_sg                 = ethtool_op_get_sg,
2319         .set_sg                 = ethtool_op_set_sg,
2320 #ifdef NETIF_F_TSO
2321         .get_tso                = ethtool_op_get_tso,
2322         .set_tso                = igb_set_tso,
2323 #endif
2324 #ifdef ETHTOOL_GFLAGS
2325         .get_flags              = ethtool_op_get_flags,
2326         .set_flags              = igb_set_flags,
2327 #endif /* ETHTOOL_GFLAGS */
2328 #endif /* HAVE_NDO_SET_FEATURES */
2329 #ifdef ETHTOOL_GADV_COAL
2330         .get_advcoal            = igb_get_adv_coal,
2331         .set_advcoal            = igb_set_dmac_coal
2332 #endif /* ETHTOOL_GADV_COAL */
2333 };
2334
2335 void igb_set_ethtool_ops(struct net_device *netdev)
2336 {
2337         SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
2338 }
2339
2340 #endif  /* SIOCETHTOOL */