92794c87f14706044f170bd131430915245e7431
[dpdk.git] / lib / librte_pmd_e1000 / igb_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_tailq.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55
56 #include "e1000_logs.h"
57 #include "e1000/e1000_api.h"
58 #include "e1000_ethdev.h"
59
60 static int  eth_igb_configure(struct rte_eth_dev *dev);
61 static int  eth_igb_start(struct rte_eth_dev *dev);
62 static void eth_igb_stop(struct rte_eth_dev *dev);
63 static void eth_igb_close(struct rte_eth_dev *dev);
64 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
65 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
66 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
67 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
68 static int  eth_igb_link_update(struct rte_eth_dev *dev,
69                                 int wait_to_complete);
70 static void eth_igb_stats_get(struct rte_eth_dev *dev,
71                                 struct rte_eth_stats *rte_stats);
72 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
73 static void eth_igb_infos_get(struct rte_eth_dev *dev,
74                                 struct rte_eth_dev_info *dev_info);
75 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
76                                 struct rte_eth_fc_conf *fc_conf);
77 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
78 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
79 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
80 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
81                                                         void *param);
82 static int  igb_hardware_init(struct e1000_hw *hw);
83 static void igb_hw_control_acquire(struct e1000_hw *hw);
84 static void igb_hw_control_release(struct e1000_hw *hw);
85 static void igb_init_manageability(struct e1000_hw *hw);
86 static void igb_release_manageability(struct e1000_hw *hw);
87
88 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
89                 uint16_t vlan_id, int on);
90 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
91 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
92
93 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
94 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
95 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
96 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
97 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
98 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
99
100 static int eth_igb_led_on(struct rte_eth_dev *dev);
101 static int eth_igb_led_off(struct rte_eth_dev *dev);
102
103 static void igb_intr_disable(struct e1000_hw *hw);
104 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
105 static void eth_igb_rar_set(struct rte_eth_dev *dev,
106                 struct ether_addr *mac_addr,
107                 uint32_t index, uint32_t pool);
108 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
109
110 static void igbvf_intr_disable(struct e1000_hw *hw);
111 static int igbvf_dev_configure(struct rte_eth_dev *dev);
112 static int igbvf_dev_start(struct rte_eth_dev *dev);
113 static void igbvf_dev_stop(struct rte_eth_dev *dev);
114 static void igbvf_dev_close(struct rte_eth_dev *dev);
115 static int eth_igbvf_link_update(struct e1000_hw *hw);
116 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
117 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
118 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 
119                 uint16_t vlan_id, int on);
120 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
121 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
122 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
123                  struct rte_eth_rss_reta *reta_conf);
124 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
125                 struct rte_eth_rss_reta *reta_conf);
126
127 /*
128  * Define VF Stats MACRO for Non "cleared on read" register
129  */
130 #define UPDATE_VF_STAT(reg, last, cur)            \
131 {                                                 \
132         u32 latest = E1000_READ_REG(hw, reg);     \
133         cur += latest - last;                     \
134         last = latest;                            \
135 }
136
137
138 #define IGB_FC_PAUSE_TIME 0x0680
139 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
140 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
141
142 #define IGBVF_PMD_NAME "rte_igbvf_pmd"     /* PMD name */
143
144 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
145
146 /*
147  * The set of PCI devices this driver supports
148  */
149 static struct rte_pci_id pci_id_igb_map[] = {
150
151 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
152 #include "rte_pci_dev_ids.h"
153
154 {.device_id = 0},
155 };
156
157 /*
158  * The set of PCI devices this driver supports (for 82576&I350 VF)
159  */
160 static struct rte_pci_id pci_id_igbvf_map[] = {
161
162 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
163 #include "rte_pci_dev_ids.h"
164
165 {.device_id = 0},
166 };
167
168 static struct eth_dev_ops eth_igb_ops = {
169         .dev_configure        = eth_igb_configure,
170         .dev_start            = eth_igb_start,
171         .dev_stop             = eth_igb_stop,
172         .dev_close            = eth_igb_close,
173         .promiscuous_enable   = eth_igb_promiscuous_enable,
174         .promiscuous_disable  = eth_igb_promiscuous_disable,
175         .allmulticast_enable  = eth_igb_allmulticast_enable,
176         .allmulticast_disable = eth_igb_allmulticast_disable,
177         .link_update          = eth_igb_link_update,
178         .stats_get            = eth_igb_stats_get,
179         .stats_reset          = eth_igb_stats_reset,
180         .dev_infos_get        = eth_igb_infos_get,
181         .vlan_filter_set      = eth_igb_vlan_filter_set,
182         .vlan_tpid_set        = eth_igb_vlan_tpid_set,
183         .vlan_offload_set     = eth_igb_vlan_offload_set,
184         .rx_queue_setup       = eth_igb_rx_queue_setup,
185         .rx_queue_release     = eth_igb_rx_queue_release,
186         .rx_queue_count       = eth_igb_rx_queue_count,
187         .rx_descriptor_done   = eth_igb_rx_descriptor_done,
188         .tx_queue_setup       = eth_igb_tx_queue_setup,
189         .tx_queue_release     = eth_igb_tx_queue_release,
190         .dev_led_on           = eth_igb_led_on,
191         .dev_led_off          = eth_igb_led_off,
192         .flow_ctrl_set        = eth_igb_flow_ctrl_set,
193         .mac_addr_add         = eth_igb_rar_set,
194         .mac_addr_remove      = eth_igb_rar_clear,
195         .reta_update          = eth_igb_rss_reta_update,
196         .reta_query           = eth_igb_rss_reta_query,
197         .rss_hash_update      = eth_igb_rss_hash_update,
198 };
199
200 /*
201  * dev_ops for virtual function, bare necessities for basic vf
202  * operation have been implemented
203  */
204 static struct eth_dev_ops igbvf_eth_dev_ops = {
205         .dev_configure        = igbvf_dev_configure,
206         .dev_start            = igbvf_dev_start,
207         .dev_stop             = igbvf_dev_stop,
208         .dev_close            = igbvf_dev_close,
209         .link_update          = eth_igb_link_update,
210         .stats_get            = eth_igbvf_stats_get,
211         .stats_reset          = eth_igbvf_stats_reset,
212         .vlan_filter_set      = igbvf_vlan_filter_set,
213         .dev_infos_get        = eth_igb_infos_get,
214         .rx_queue_setup       = eth_igb_rx_queue_setup,
215         .rx_queue_release     = eth_igb_rx_queue_release,
216         .tx_queue_setup       = eth_igb_tx_queue_setup,
217         .tx_queue_release     = eth_igb_tx_queue_release,
218 };
219
220 /**
221  * Atomically reads the link status information from global
222  * structure rte_eth_dev.
223  *
224  * @param dev
225  *   - Pointer to the structure rte_eth_dev to read from.
226  *   - Pointer to the buffer to be saved with the link status.
227  *
228  * @return
229  *   - On success, zero.
230  *   - On failure, negative value.
231  */
232 static inline int
233 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
234                                 struct rte_eth_link *link)
235 {
236         struct rte_eth_link *dst = link;
237         struct rte_eth_link *src = &(dev->data->dev_link);
238
239         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
240                                         *(uint64_t *)src) == 0)
241                 return -1;
242
243         return 0;
244 }
245
246 /**
247  * Atomically writes the link status information into global
248  * structure rte_eth_dev.
249  *
250  * @param dev
251  *   - Pointer to the structure rte_eth_dev to read from.
252  *   - Pointer to the buffer to be saved with the link status.
253  *
254  * @return
255  *   - On success, zero.
256  *   - On failure, negative value.
257  */
258 static inline int
259 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
260                                 struct rte_eth_link *link)
261 {
262         struct rte_eth_link *dst = &(dev->data->dev_link);
263         struct rte_eth_link *src = link;
264
265         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
266                                         *(uint64_t *)src) == 0)
267                 return -1;
268
269         return 0;
270 }
271
272 static inline void
273 igb_intr_enable(struct rte_eth_dev *dev)
274 {
275         struct e1000_interrupt *intr =
276                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
277         struct e1000_hw *hw =
278                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
279  
280         E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
281         E1000_WRITE_FLUSH(hw);
282 }
283
284 static void
285 igb_intr_disable(struct e1000_hw *hw)
286 {
287         E1000_WRITE_REG(hw, E1000_IMC, ~0);
288         E1000_WRITE_FLUSH(hw);
289 }
290
291 static inline int32_t
292 igb_pf_reset_hw(struct e1000_hw *hw)
293 {
294         uint32_t ctrl_ext;
295         int32_t status;
296  
297         status = e1000_reset_hw(hw);
298  
299         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
300         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
301         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
302         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
303         E1000_WRITE_FLUSH(hw);
304  
305         return status;
306 }
307  
308 static void
309 igb_identify_hardware(struct rte_eth_dev *dev)
310 {
311         struct e1000_hw *hw =
312                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
313
314         hw->vendor_id = dev->pci_dev->id.vendor_id;
315         hw->device_id = dev->pci_dev->id.device_id;
316         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
317         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
318
319         e1000_set_mac_type(hw);
320
321         /* need to check if it is a vf device below */
322 }
323
324 static int
325 igb_reset_swfw_lock(struct e1000_hw *hw)
326 {
327         int ret_val;
328
329         /*
330          * Do mac ops initialization manually here, since we will need
331          * some function pointers set by this call.
332          */
333         ret_val = e1000_init_mac_params(hw);
334         if (ret_val)
335                 return ret_val;
336
337         /*
338          * SMBI lock should not fail in this early stage. If this is the case,
339          * it is due to an improper exit of the application.
340          * So force the release of the faulty lock.
341          */
342         if (e1000_get_hw_semaphore_generic(hw) < 0) {
343                 DEBUGOUT("SMBI lock released");
344         }
345         e1000_put_hw_semaphore_generic(hw);
346
347         if (hw->mac.ops.acquire_swfw_sync != NULL) {
348                 uint16_t mask;
349
350                 /*
351                  * Phy lock should not fail in this early stage. If this is the case,
352                  * it is due to an improper exit of the application.
353                  * So force the release of the faulty lock.
354                  */
355                 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
356                 if (hw->bus.func > E1000_FUNC_1)
357                         mask <<= 2;
358                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
359                         DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
360                 }
361                 hw->mac.ops.release_swfw_sync(hw, mask);
362
363                 /*
364                  * This one is more tricky since it is common to all ports; but
365                  * swfw_sync retries last long enough (1s) to be almost sure that if
366                  * lock can not be taken it is due to an improper lock of the
367                  * semaphore.
368                  */
369                 mask = E1000_SWFW_EEP_SM;
370                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
371                         DEBUGOUT("SWFW common locks released");
372                 }
373                 hw->mac.ops.release_swfw_sync(hw, mask);
374         }
375
376         return E1000_SUCCESS;
377 }
378
379 static int
380 eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
381                    struct rte_eth_dev *eth_dev)
382 {
383         int error = 0;
384         struct rte_pci_device *pci_dev;
385         struct e1000_hw *hw =
386                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
387         struct e1000_vfta * shadow_vfta =
388                         E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
389         uint32_t ctrl_ext;
390
391         pci_dev = eth_dev->pci_dev;
392         eth_dev->dev_ops = &eth_igb_ops;
393         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
394         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
395
396         /* for secondary processes, we don't initialise any further as primary
397          * has already done this work. Only check we don't need a different
398          * RX function */
399         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
400                 if (eth_dev->data->scattered_rx)
401                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
402                 return 0;
403         }
404
405         hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
406
407         igb_identify_hardware(eth_dev);
408         if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
409                 error = -EIO;
410                 goto err_late;
411         }
412
413         e1000_get_bus_info(hw);
414
415         /* Reset any pending lock */
416         if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
417                 error = -EIO;
418                 goto err_late;
419         }
420
421         /* Finish initialization */
422         if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
423                 error = -EIO;
424                 goto err_late;
425         }
426
427         hw->mac.autoneg = 1;
428         hw->phy.autoneg_wait_to_complete = 0;
429         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
430
431         /* Copper options */
432         if (hw->phy.media_type == e1000_media_type_copper) {
433                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
434                 hw->phy.disable_polarity_correction = 0;
435                 hw->phy.ms_type = e1000_ms_hw_default;
436         }
437
438         /*
439          * Start from a known state, this is important in reading the nvm
440          * and mac from that.
441          */
442         igb_pf_reset_hw(hw);
443
444         /* Make sure we have a good EEPROM before we read from it */
445         if (e1000_validate_nvm_checksum(hw) < 0) {
446                 /*
447                  * Some PCI-E parts fail the first check due to
448                  * the link being in sleep state, call it again,
449                  * if it fails a second time its a real issue.
450                  */
451                 if (e1000_validate_nvm_checksum(hw) < 0) {
452                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
453                         error = -EIO;
454                         goto err_late;
455                 }
456         }
457
458         /* Read the permanent MAC address out of the EEPROM */
459         if (e1000_read_mac_addr(hw) != 0) {
460                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
461                 error = -EIO;
462                 goto err_late;
463         }
464
465         /* Allocate memory for storing MAC addresses */
466         eth_dev->data->mac_addrs = rte_zmalloc("e1000",
467                 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
468         if (eth_dev->data->mac_addrs == NULL) {
469                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
470                                                 "store MAC addresses",
471                                 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
472                 error = -ENOMEM;
473                 goto err_late;
474         }
475
476         /* Copy the permanent MAC address */
477         ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
478
479         /* initialize the vfta */
480         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
481
482         /* Now initialize the hardware */
483         if (igb_hardware_init(hw) != 0) {
484                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
485                 rte_free(eth_dev->data->mac_addrs);
486                 eth_dev->data->mac_addrs = NULL;
487                 error = -ENODEV;
488                 goto err_late;
489         }
490         hw->mac.get_link_status = 1;
491
492         /* Indicate SOL/IDER usage */
493         if (e1000_check_reset_block(hw) < 0) {
494                 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
495                                         "SOL/IDER session");
496         }
497
498         /* initialize PF if max_vfs not zero */
499         igb_pf_host_init(eth_dev);
500  
501         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
502         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
503         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
504         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
505         E1000_WRITE_FLUSH(hw);
506
507         PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
508                      eth_dev->data->port_id, pci_dev->id.vendor_id,
509                      pci_dev->id.device_id);
510
511         rte_intr_callback_register(&(pci_dev->intr_handle),
512                 eth_igb_interrupt_handler, (void *)eth_dev);
513
514         /* enable uio intr after callback register */
515         rte_intr_enable(&(pci_dev->intr_handle));
516         
517         /* enable support intr */
518         igb_intr_enable(eth_dev);
519         
520         return 0;
521
522 err_late:
523         igb_hw_control_release(hw);
524
525         return (error);
526 }
527
528 /*
529  * Virtual Function device init
530  */
531 static int
532 eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
533                 struct rte_eth_dev *eth_dev)
534 {
535         struct rte_pci_device *pci_dev;
536         struct e1000_hw *hw =
537                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
538         int diag;
539
540         PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init");
541
542         eth_dev->dev_ops = &igbvf_eth_dev_ops;
543         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
544         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
545
546         /* for secondary processes, we don't initialise any further as primary
547          * has already done this work. Only check we don't need a different
548          * RX function */
549         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
550                 if (eth_dev->data->scattered_rx)
551                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
552                 return 0;
553         }
554
555         pci_dev = eth_dev->pci_dev;
556
557         hw->device_id = pci_dev->id.device_id;
558         hw->vendor_id = pci_dev->id.vendor_id;
559         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
560
561         /* Initialize the shared code */
562         diag = e1000_setup_init_funcs(hw, TRUE);
563         if (diag != 0) {
564                 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
565                         diag);
566                 return -EIO;
567         }
568
569         /* init_mailbox_params */
570         hw->mbx.ops.init_params(hw);
571
572         /* Disable the interrupts for VF */
573         igbvf_intr_disable(hw);
574         
575         diag = hw->mac.ops.reset_hw(hw);
576
577         /* Allocate memory for storing MAC addresses */
578         eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
579                 hw->mac.rar_entry_count, 0);
580         if (eth_dev->data->mac_addrs == NULL) {
581                 PMD_INIT_LOG(ERR,
582                         "Failed to allocate %d bytes needed to store MAC "
583                         "addresses",
584                         ETHER_ADDR_LEN * hw->mac.rar_entry_count);
585                 return -ENOMEM;
586         }
587         
588         /* Copy the permanent MAC address */
589         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
590                         &eth_dev->data->mac_addrs[0]);
591
592         PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x "
593                         "mac.type=%s\n",
594                         eth_dev->data->port_id, pci_dev->id.vendor_id,
595                         pci_dev->id.device_id,
596                         "igb_mac_82576_vf");
597
598         return 0;
599 }
600
601 static struct eth_driver rte_igb_pmd = {
602         {
603                 .name = "rte_igb_pmd",
604                 .id_table = pci_id_igb_map,
605                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
606         },
607         .eth_dev_init = eth_igb_dev_init,
608         .dev_private_size = sizeof(struct e1000_adapter),
609 };
610
611 /*
612  * virtual function driver struct
613  */
614 static struct eth_driver rte_igbvf_pmd = {
615         {
616                 .name = "rte_igbvf_pmd",
617                 .id_table = pci_id_igbvf_map,
618                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
619         },
620         .eth_dev_init = eth_igbvf_dev_init,
621         .dev_private_size = sizeof(struct e1000_adapter),
622 };
623
624 static int
625 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
626 {
627         rte_eth_driver_register(&rte_igb_pmd);
628         return 0;
629 }
630
631 static void
632 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
633 {
634         struct e1000_hw *hw =
635                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
636         /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
637         uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
638         rctl |= E1000_RCTL_VFE;
639         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
640 }
641
642 /*
643  * VF Driver initialization routine.
644  * Invoked one at EAL init time.
645  * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
646  */
647 static int
648 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
649 {
650         DEBUGFUNC("rte_igbvf_pmd_init");
651
652         rte_eth_driver_register(&rte_igbvf_pmd);
653         return (0);
654 }
655
656 static int
657 eth_igb_configure(struct rte_eth_dev *dev)
658 {
659         struct e1000_interrupt *intr =
660                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
661
662         PMD_INIT_LOG(DEBUG, ">>");
663
664         intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
665
666         PMD_INIT_LOG(DEBUG, "<<");
667
668         return (0);
669 }
670
671 static int
672 eth_igb_start(struct rte_eth_dev *dev)
673 {
674         struct e1000_hw *hw =
675                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
676         int ret, i, mask;
677         uint32_t ctrl_ext;
678
679         PMD_INIT_LOG(DEBUG, ">>");
680
681         /* Power up the phy. Needed to make the link go Up */
682         e1000_power_up_phy(hw);
683
684         /*
685          * Packet Buffer Allocation (PBA)
686          * Writing PBA sets the receive portion of the buffer
687          * the remainder is used for the transmit buffer.
688          */
689         if (hw->mac.type == e1000_82575) {
690                 uint32_t pba;
691
692                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
693                 E1000_WRITE_REG(hw, E1000_PBA, pba);
694         }
695
696         /* Put the address into the Receive Address Array */
697         e1000_rar_set(hw, hw->mac.addr, 0);
698
699         /* Initialize the hardware */
700         if (igb_hardware_init(hw)) {
701                 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
702                 return (-EIO);
703         }
704
705         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
706
707         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
708         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
709         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
710         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
711         E1000_WRITE_FLUSH(hw);
712
713         /* configure PF module if SRIOV enabled */
714         igb_pf_host_configure(dev);
715
716         /* Configure for OS presence */
717         igb_init_manageability(hw);
718
719         eth_igb_tx_init(dev);
720
721         /* This can fail when allocating mbufs for descriptor rings */
722         ret = eth_igb_rx_init(dev);
723         if (ret) {
724                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
725                 igb_dev_clear_queues(dev);
726                 return ret;
727         }
728
729         e1000_clear_hw_cntrs_base_generic(hw);
730
731         /*
732          * VLAN Offload Settings
733          */
734         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
735                         ETH_VLAN_EXTEND_MASK;
736         eth_igb_vlan_offload_set(dev, mask);
737
738         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
739                 /* Enable VLAN filter since VMDq always use VLAN filter */
740                 igb_vmdq_vlan_hw_filter_enable(dev);
741         }
742                 
743         /*
744          * Configure the Interrupt Moderation register (EITR) with the maximum
745          * possible value (0xFFFF) to minimize "System Partial Write" issued by
746          * spurious [DMA] memory updates of RX and TX ring descriptors.
747          *
748          * With a EITR granularity of 2 microseconds in the 82576, only 7/8
749          * spurious memory updates per second should be expected.
750          * ((65535 * 2) / 1000.1000 ~= 0.131 second).
751          *
752          * Because interrupts are not used at all, the MSI-X is not activated
753          * and interrupt moderation is controlled by EITR[0].
754          *
755          * Note that having [almost] disabled memory updates of RX and TX ring
756          * descriptors through the Interrupt Moderation mechanism, memory
757          * updates of ring descriptors are now moderated by the configurable
758          * value of Write-Back Threshold registers.
759          */
760         if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
761                 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210)) {
762                 uint32_t ivar;
763
764                 /* Enable all RX & TX queues in the IVAR registers */
765                 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
766                 for (i = 0; i < 8; i++)
767                         E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
768
769                 /* Configure EITR with the maximum possible value (0xFFFF) */
770                 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
771         }
772
773         /* Setup link speed and duplex */
774         switch (dev->data->dev_conf.link_speed) {
775         case ETH_LINK_SPEED_AUTONEG:
776                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
777                         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
778                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
779                         hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
780                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
781                         hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
782                 else
783                         goto error_invalid_config;
784                 break;
785         case ETH_LINK_SPEED_10:
786                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
787                         hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
788                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
789                         hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
790                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
791                         hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
792                 else
793                         goto error_invalid_config;
794                 break;
795         case ETH_LINK_SPEED_100:
796                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
797                         hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
798                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
799                         hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
800                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
801                         hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
802                 else
803                         goto error_invalid_config;
804                 break;
805         case ETH_LINK_SPEED_1000:
806                 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
807                                 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
808                         hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
809                 else
810                         goto error_invalid_config;
811                 break;
812         case ETH_LINK_SPEED_10000:
813         default:
814                 goto error_invalid_config;
815         }
816         e1000_setup_link(hw);
817
818         /* check if lsc interrupt feature is enabled */
819         if (dev->data->dev_conf.intr_conf.lsc != 0)
820                 ret = eth_igb_lsc_interrupt_setup(dev);
821
822         /* resume enabled intr since hw reset */
823         igb_intr_enable(dev);
824
825         PMD_INIT_LOG(DEBUG, "<<");
826
827         return (0);
828
829 error_invalid_config:
830         PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
831                         dev->data->dev_conf.link_speed,
832                         dev->data->dev_conf.link_duplex, dev->data->port_id);
833         igb_dev_clear_queues(dev);
834         return (-EINVAL);
835 }
836
837 /*********************************************************************
838  *
839  *  This routine disables all traffic on the adapter by issuing a
840  *  global reset on the MAC.
841  *
842  **********************************************************************/
843 static void
844 eth_igb_stop(struct rte_eth_dev *dev)
845 {
846         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
847         struct rte_eth_link link;
848
849         igb_intr_disable(hw);
850         igb_pf_reset_hw(hw);
851         E1000_WRITE_REG(hw, E1000_WUC, 0);
852
853         /* Set bit for Go Link disconnect */
854         if (hw->mac.type >= e1000_82580) {
855                 uint32_t phpm_reg;
856
857                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
858                 phpm_reg |= E1000_82580_PM_GO_LINKD;
859                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
860         }
861
862         /* Power down the phy. Needed to make the link go Down */
863         e1000_power_down_phy(hw);
864
865         igb_dev_clear_queues(dev);
866
867         /* clear the recorded link status */
868         memset(&link, 0, sizeof(link));
869         rte_igb_dev_atomic_write_link_status(dev, &link);
870 }
871
872 static void
873 eth_igb_close(struct rte_eth_dev *dev)
874 {
875         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
876         struct rte_eth_link link;
877
878         eth_igb_stop(dev);
879         e1000_phy_hw_reset(hw);
880         igb_release_manageability(hw);
881         igb_hw_control_release(hw);
882
883         /* Clear bit for Go Link disconnect */
884         if (hw->mac.type >= e1000_82580) {
885                 uint32_t phpm_reg;
886
887                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
888                 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
889                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
890         }
891
892         igb_dev_clear_queues(dev);
893
894         memset(&link, 0, sizeof(link));
895         rte_igb_dev_atomic_write_link_status(dev, &link);
896 }
897
898 static int
899 igb_get_rx_buffer_size(struct e1000_hw *hw)
900 {
901         uint32_t rx_buf_size;
902         if (hw->mac.type == e1000_82576) {
903                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
904         } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
905                 /* PBS needs to be translated according to a lookup table */
906                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
907                 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
908                 rx_buf_size = (rx_buf_size << 10);
909         } else if (hw->mac.type == e1000_i210) {
910                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
911         } else {
912                 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
913         }
914
915         return rx_buf_size;
916 }
917
918 /*********************************************************************
919  *
920  *  Initialize the hardware
921  *
922  **********************************************************************/
923 static int
924 igb_hardware_init(struct e1000_hw *hw)
925 {
926         uint32_t rx_buf_size;
927         int diag;
928
929         /* Let the firmware know the OS is in control */
930         igb_hw_control_acquire(hw);
931
932         /*
933          * These parameters control the automatic generation (Tx) and
934          * response (Rx) to Ethernet PAUSE frames.
935          * - High water mark should allow for at least two standard size (1518)
936          *   frames to be received after sending an XOFF.
937          * - Low water mark works best when it is very near the high water mark.
938          *   This allows the receiver to restart by sending XON when it has
939          *   drained a bit. Here we use an arbitrary value of 1500 which will
940          *   restart after one full frame is pulled from the buffer. There
941          *   could be several smaller frames in the buffer and if so they will
942          *   not trigger the XON until their total number reduces the buffer
943          *   by 1500.
944          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
945          */
946         rx_buf_size = igb_get_rx_buffer_size(hw);
947
948         hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
949         hw->fc.low_water = hw->fc.high_water - 1500;
950         hw->fc.pause_time = IGB_FC_PAUSE_TIME;
951         hw->fc.send_xon = 1;
952
953         /* Set Flow control, use the tunable location if sane */
954         if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
955                 hw->fc.requested_mode = igb_fc_setting;
956         else
957                 hw->fc.requested_mode = e1000_fc_none;
958
959         /* Issue a global reset */
960         igb_pf_reset_hw(hw);
961         E1000_WRITE_REG(hw, E1000_WUC, 0);
962
963         diag = e1000_init_hw(hw);
964         if (diag < 0)
965                 return (diag);
966
967         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
968         e1000_get_phy_info(hw);
969         e1000_check_for_link(hw);
970
971         return (0);
972 }
973
974 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
975 static void
976 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
977 {
978         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
979         struct e1000_hw_stats *stats =
980                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
981         int pause_frames;
982
983         if(hw->phy.media_type == e1000_media_type_copper ||
984             (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
985                 stats->symerrs +=
986                     E1000_READ_REG(hw,E1000_SYMERRS);
987                 stats->sec += E1000_READ_REG(hw, E1000_SEC);
988         }
989
990         stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
991         stats->mpc += E1000_READ_REG(hw, E1000_MPC);
992         stats->scc += E1000_READ_REG(hw, E1000_SCC);
993         stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
994
995         stats->mcc += E1000_READ_REG(hw, E1000_MCC);
996         stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
997         stats->colc += E1000_READ_REG(hw, E1000_COLC);
998         stats->dc += E1000_READ_REG(hw, E1000_DC);
999         stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1000         stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1001         stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1002         /*
1003         ** For watchdog management we need to know if we have been
1004         ** paused during the last interval, so capture that here.
1005         */
1006         pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1007         stats->xoffrxc += pause_frames;
1008         stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1009         stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1010         stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1011         stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1012         stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1013         stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1014         stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1015         stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1016         stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1017         stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1018         stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1019         stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1020
1021         /* For the 64-bit byte counters the low dword must be read first. */
1022         /* Both registers clear on the read of the high dword */
1023
1024         stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1025         stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1026         stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1027         stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1028
1029         stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1030         stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1031         stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1032         stats->roc += E1000_READ_REG(hw, E1000_ROC);
1033         stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1034
1035         stats->tor += E1000_READ_REG(hw, E1000_TORH);
1036         stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1037
1038         stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1039         stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1040         stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1041         stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1042         stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1043         stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1044         stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1045         stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1046         stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1047         stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1048
1049         /* Interrupt Counts */
1050
1051         stats->iac += E1000_READ_REG(hw, E1000_IAC);
1052         stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1053         stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1054         stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1055         stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1056         stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1057         stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1058         stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1059         stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1060
1061         /* Host to Card Statistics */
1062
1063         stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1064         stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1065         stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1066         stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1067         stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1068         stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1069         stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1070         stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1071         stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1072         stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1073         stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1074         stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1075         stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1076         stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1077
1078         stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1079         stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1080         stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1081         stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1082         stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1083         stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1084
1085         if (rte_stats == NULL)
1086                 return;
1087
1088         /* Rx Errors */
1089         rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
1090             stats->ruc + stats->roc + stats->mpc + stats->cexterr;
1091
1092         /* Tx Errors */
1093         rte_stats->oerrors = stats->ecol + stats->latecol;
1094
1095         /* XON/XOFF pause frames */
1096         rte_stats->tx_pause_xon  = stats->xontxc;
1097         rte_stats->rx_pause_xon  = stats->xonrxc;
1098         rte_stats->tx_pause_xoff = stats->xofftxc;
1099         rte_stats->rx_pause_xoff = stats->xoffrxc;
1100
1101         rte_stats->ipackets = stats->gprc;
1102         rte_stats->opackets = stats->gptc;
1103         rte_stats->ibytes   = stats->gorc;
1104         rte_stats->obytes   = stats->gotc;
1105 }
1106
1107 static void
1108 eth_igb_stats_reset(struct rte_eth_dev *dev)
1109 {
1110         struct e1000_hw_stats *hw_stats =
1111                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1112
1113         /* HW registers are cleared on read */
1114         eth_igb_stats_get(dev, NULL);
1115
1116         /* Reset software totals */
1117         memset(hw_stats, 0, sizeof(*hw_stats));
1118 }
1119
1120 static void
1121 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1122 {
1123         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1124         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1125                           E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1126
1127         /* Good Rx packets, include VF loopback */
1128         UPDATE_VF_STAT(E1000_VFGPRC,
1129             hw_stats->last_gprc, hw_stats->gprc);
1130
1131         /* Good Rx octets, include VF loopback */
1132         UPDATE_VF_STAT(E1000_VFGORC,
1133             hw_stats->last_gorc, hw_stats->gorc);
1134
1135         /* Good Tx packets, include VF loopback */
1136         UPDATE_VF_STAT(E1000_VFGPTC,
1137             hw_stats->last_gptc, hw_stats->gptc);
1138
1139         /* Good Tx octets, include VF loopback */
1140         UPDATE_VF_STAT(E1000_VFGOTC,
1141             hw_stats->last_gotc, hw_stats->gotc);
1142
1143         /* Rx Multicst packets */
1144         UPDATE_VF_STAT(E1000_VFMPRC,
1145             hw_stats->last_mprc, hw_stats->mprc);
1146
1147         /* Good Rx loopback packets */
1148         UPDATE_VF_STAT(E1000_VFGPRLBC,
1149             hw_stats->last_gprlbc, hw_stats->gprlbc);
1150
1151         /* Good Rx loopback octets */
1152         UPDATE_VF_STAT(E1000_VFGORLBC,
1153             hw_stats->last_gorlbc, hw_stats->gorlbc);
1154
1155         /* Good Tx loopback packets */
1156         UPDATE_VF_STAT(E1000_VFGPTLBC,
1157             hw_stats->last_gptlbc, hw_stats->gptlbc);
1158
1159         /* Good Tx loopback octets */
1160         UPDATE_VF_STAT(E1000_VFGOTLBC,
1161             hw_stats->last_gotlbc, hw_stats->gotlbc);
1162
1163         if (rte_stats == NULL)
1164                 return;
1165
1166         memset(rte_stats, 0, sizeof(*rte_stats));
1167         rte_stats->ipackets = hw_stats->gprc;
1168         rte_stats->ibytes = hw_stats->gorc;
1169         rte_stats->opackets = hw_stats->gptc;
1170         rte_stats->obytes = hw_stats->gotc;
1171         rte_stats->imcasts = hw_stats->mprc;
1172         rte_stats->ilbpackets = hw_stats->gprlbc;
1173         rte_stats->ilbbytes = hw_stats->gorlbc;
1174         rte_stats->olbpackets = hw_stats->gptlbc;
1175         rte_stats->olbbytes = hw_stats->gotlbc;
1176
1177 }
1178
1179 static void
1180 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1181 {
1182         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1183                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1184
1185         /* Sync HW register to the last stats */
1186         eth_igbvf_stats_get(dev, NULL);
1187
1188         /* reset HW current stats*/
1189         memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1190                offsetof(struct e1000_vf_stats, gprc));
1191
1192 }
1193
1194 static void
1195 eth_igb_infos_get(struct rte_eth_dev *dev,
1196                     struct rte_eth_dev_info *dev_info)
1197 {
1198         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1199
1200         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1201         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1202         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1203         dev_info->rx_offload_capa =
1204                 DEV_RX_OFFLOAD_VLAN_STRIP |
1205                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1206                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1207                 DEV_RX_OFFLOAD_TCP_CKSUM;
1208         dev_info->tx_offload_capa =
1209                 DEV_TX_OFFLOAD_VLAN_INSERT |
1210                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1211                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1212                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1213                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1214
1215         switch (hw->mac.type) {
1216         case e1000_82575:
1217                 dev_info->max_rx_queues = 4;
1218                 dev_info->max_tx_queues = 4;
1219                 dev_info->max_vmdq_pools = 0;
1220                 break;
1221
1222         case e1000_82576:
1223                 dev_info->max_rx_queues = 16;
1224                 dev_info->max_tx_queues = 16;
1225                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1226                 break;
1227
1228         case e1000_82580:
1229                 dev_info->max_rx_queues = 8;
1230                 dev_info->max_tx_queues = 8;
1231                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1232                 break;
1233
1234         case e1000_i350:
1235                 dev_info->max_rx_queues = 8;
1236                 dev_info->max_tx_queues = 8;
1237                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1238                 break;
1239
1240         case e1000_i354:
1241                 dev_info->max_rx_queues = 8;
1242                 dev_info->max_tx_queues = 8;
1243                 break;
1244
1245         case e1000_i210:
1246                 dev_info->max_rx_queues = 4;
1247                 dev_info->max_tx_queues = 4;
1248                 dev_info->max_vmdq_pools = 0;
1249                 break;
1250
1251         case e1000_vfadapt:
1252                 dev_info->max_rx_queues = 2;
1253                 dev_info->max_tx_queues = 2;
1254                 dev_info->max_vmdq_pools = 0;
1255                 break;
1256
1257         case e1000_vfadapt_i350:
1258                 dev_info->max_rx_queues = 1;
1259                 dev_info->max_tx_queues = 1;
1260                 dev_info->max_vmdq_pools = 0;
1261                 break;
1262
1263         default:
1264                 /* Should not happen */
1265                 dev_info->max_rx_queues = 0;
1266                 dev_info->max_tx_queues = 0;
1267                 dev_info->max_vmdq_pools = 0;
1268         }
1269 }
1270
1271 /* return 0 means link status changed, -1 means not changed */
1272 static int
1273 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1274 {
1275         struct e1000_hw *hw =
1276                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1277         struct rte_eth_link link, old;
1278         int link_check, count;
1279
1280         link_check = 0;
1281         hw->mac.get_link_status = 1;
1282
1283         /* possible wait-to-complete in up to 9 seconds */
1284         for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1285                 /* Read the real link status */
1286                 switch (hw->phy.media_type) {
1287                 case e1000_media_type_copper:
1288                         /* Do the work to read phy */
1289                         e1000_check_for_link(hw);
1290                         link_check = !hw->mac.get_link_status;
1291                         break;
1292
1293                 case e1000_media_type_fiber:
1294                         e1000_check_for_link(hw);
1295                         link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1296                                       E1000_STATUS_LU);
1297                         break;
1298
1299                 case e1000_media_type_internal_serdes:
1300                         e1000_check_for_link(hw);
1301                         link_check = hw->mac.serdes_has_link;
1302                         break;
1303
1304                 /* VF device is type_unknown */
1305                 case e1000_media_type_unknown:
1306                         eth_igbvf_link_update(hw);
1307                         link_check = !hw->mac.get_link_status;
1308                         break;
1309
1310                 default:
1311                         break;
1312                 }
1313                 if (link_check || wait_to_complete == 0)
1314                         break;
1315                 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1316         }
1317         memset(&link, 0, sizeof(link));
1318         rte_igb_dev_atomic_read_link_status(dev, &link);
1319         old = link;
1320
1321         /* Now we check if a transition has happened */
1322         if (link_check) {
1323                 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1324                                           &link.link_duplex);
1325                 link.link_status = 1;
1326         } else if (!link_check) {
1327                 link.link_speed = 0;
1328                 link.link_duplex = 0;
1329                 link.link_status = 0;
1330         }
1331         rte_igb_dev_atomic_write_link_status(dev, &link);
1332
1333         /* not changed */
1334         if (old.link_status == link.link_status)
1335                 return -1;
1336
1337         /* changed */
1338         return 0;
1339 }
1340
1341 /*
1342  * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1343  * For ASF and Pass Through versions of f/w this means
1344  * that the driver is loaded.
1345  */
1346 static void
1347 igb_hw_control_acquire(struct e1000_hw *hw)
1348 {
1349         uint32_t ctrl_ext;
1350
1351         /* Let firmware know the driver has taken over */
1352         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1353         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1354 }
1355
1356 /*
1357  * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1358  * For ASF and Pass Through versions of f/w this means that the
1359  * driver is no longer loaded.
1360  */
1361 static void
1362 igb_hw_control_release(struct e1000_hw *hw)
1363 {
1364         uint32_t ctrl_ext;
1365
1366         /* Let firmware taken over control of h/w */
1367         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1368         E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1369                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1370 }
1371
1372 /*
1373  * Bit of a misnomer, what this really means is
1374  * to enable OS management of the system... aka
1375  * to disable special hardware management features.
1376  */
1377 static void
1378 igb_init_manageability(struct e1000_hw *hw)
1379 {
1380         if (e1000_enable_mng_pass_thru(hw)) {
1381                 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1382                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1383
1384                 /* disable hardware interception of ARP */
1385                 manc &= ~(E1000_MANC_ARP_EN);
1386
1387                 /* enable receiving management packets to the host */
1388                 manc |= E1000_MANC_EN_MNG2HOST;
1389                 manc2h |= 1 << 5;  /* Mng Port 623 */
1390                 manc2h |= 1 << 6;  /* Mng Port 664 */
1391                 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1392                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1393         }
1394 }
1395
1396 static void
1397 igb_release_manageability(struct e1000_hw *hw)
1398 {
1399         if (e1000_enable_mng_pass_thru(hw)) {
1400                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1401
1402                 manc |= E1000_MANC_ARP_EN;
1403                 manc &= ~E1000_MANC_EN_MNG2HOST;
1404
1405                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1406         }
1407 }
1408
1409 static void
1410 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1411 {
1412         struct e1000_hw *hw =
1413                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1414         uint32_t rctl;
1415
1416         rctl = E1000_READ_REG(hw, E1000_RCTL);
1417         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1418         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1419 }
1420
1421 static void
1422 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1423 {
1424         struct e1000_hw *hw =
1425                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1426         uint32_t rctl;
1427
1428         rctl = E1000_READ_REG(hw, E1000_RCTL);
1429         rctl &= (~E1000_RCTL_UPE);
1430         if (dev->data->all_multicast == 1)
1431                 rctl |= E1000_RCTL_MPE;
1432         else
1433                 rctl &= (~E1000_RCTL_MPE);
1434         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1435 }
1436
1437 static void
1438 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1439 {
1440         struct e1000_hw *hw =
1441                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1442         uint32_t rctl;
1443
1444         rctl = E1000_READ_REG(hw, E1000_RCTL);
1445         rctl |= E1000_RCTL_MPE;
1446         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1447 }
1448
1449 static void
1450 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1451 {
1452         struct e1000_hw *hw =
1453                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1454         uint32_t rctl;
1455
1456         if (dev->data->promiscuous == 1)
1457                 return; /* must remain in all_multicast mode */
1458         rctl = E1000_READ_REG(hw, E1000_RCTL);
1459         rctl &= (~E1000_RCTL_MPE);
1460         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1461 }
1462
1463 static int
1464 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1465 {
1466         struct e1000_hw *hw =
1467                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1468         struct e1000_vfta * shadow_vfta =
1469                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1470         uint32_t vfta;
1471         uint32_t vid_idx;
1472         uint32_t vid_bit;
1473
1474         vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1475                               E1000_VFTA_ENTRY_MASK);
1476         vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1477         vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1478         if (on)
1479                 vfta |= vid_bit;
1480         else
1481                 vfta &= ~vid_bit;
1482         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1483
1484         /* update local VFTA copy */
1485         shadow_vfta->vfta[vid_idx] = vfta;
1486
1487         return 0;
1488 }
1489
1490 static void
1491 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1492 {
1493         struct e1000_hw *hw =
1494                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1495         uint32_t reg = ETHER_TYPE_VLAN ;
1496
1497         reg |= (tpid << 16);
1498         E1000_WRITE_REG(hw, E1000_VET, reg);
1499 }
1500
1501 static void
1502 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1503 {
1504         struct e1000_hw *hw =
1505                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1506         uint32_t reg;
1507
1508         /* Filter Table Disable */
1509         reg = E1000_READ_REG(hw, E1000_RCTL);
1510         reg &= ~E1000_RCTL_CFIEN;
1511         reg &= ~E1000_RCTL_VFE;
1512         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1513 }
1514
1515 static void
1516 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1517 {
1518         struct e1000_hw *hw =
1519                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1520         struct e1000_vfta * shadow_vfta =
1521                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1522         uint32_t reg;
1523         int i;
1524
1525         /* Filter Table Enable, CFI not used for packet acceptance */
1526         reg = E1000_READ_REG(hw, E1000_RCTL);
1527         reg &= ~E1000_RCTL_CFIEN;
1528         reg |= E1000_RCTL_VFE;
1529         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1530
1531         /* restore VFTA table */
1532         for (i = 0; i < IGB_VFTA_SIZE; i++)
1533                 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1534 }
1535
1536 static void
1537 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1538 {
1539         struct e1000_hw *hw =
1540                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541         uint32_t reg;
1542
1543         /* VLAN Mode Disable */
1544         reg = E1000_READ_REG(hw, E1000_CTRL);
1545         reg &= ~E1000_CTRL_VME;
1546         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1547 }
1548
1549 static void
1550 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1551 {
1552         struct e1000_hw *hw =
1553                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554         uint32_t reg;
1555
1556         /* VLAN Mode Enable */
1557         reg = E1000_READ_REG(hw, E1000_CTRL);
1558         reg |= E1000_CTRL_VME;
1559         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1560 }
1561
1562 static void
1563 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1564 {
1565         struct e1000_hw *hw =
1566                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1567         uint32_t reg;
1568
1569         /* CTRL_EXT: Extended VLAN */
1570         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1571         reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1572         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1573
1574         /* Update maximum packet length */
1575         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1576                 E1000_WRITE_REG(hw, E1000_RLPML,
1577                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1578                                                 VLAN_TAG_SIZE);
1579 }
1580
1581 static void
1582 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1583 {
1584         struct e1000_hw *hw =
1585                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1586         uint32_t reg;
1587
1588         /* CTRL_EXT: Extended VLAN */
1589         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1590         reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1591         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1592
1593         /* Update maximum packet length */
1594         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1595                 E1000_WRITE_REG(hw, E1000_RLPML,
1596                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1597                                                 2 * VLAN_TAG_SIZE);
1598 }
1599
1600 static void
1601 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1602 {
1603         if(mask & ETH_VLAN_STRIP_MASK){
1604                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1605                         igb_vlan_hw_strip_enable(dev);
1606                 else
1607                         igb_vlan_hw_strip_disable(dev);
1608         }
1609         
1610         if(mask & ETH_VLAN_FILTER_MASK){
1611                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1612                         igb_vlan_hw_filter_enable(dev);
1613                 else
1614                         igb_vlan_hw_filter_disable(dev);
1615         }
1616         
1617         if(mask & ETH_VLAN_EXTEND_MASK){
1618                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1619                         igb_vlan_hw_extend_enable(dev);
1620                 else
1621                         igb_vlan_hw_extend_disable(dev);
1622         }
1623 }
1624
1625
1626 /**
1627  * It enables the interrupt mask and then enable the interrupt.
1628  *
1629  * @param dev
1630  *  Pointer to struct rte_eth_dev.
1631  *
1632  * @return
1633  *  - On success, zero.
1634  *  - On failure, a negative value.
1635  */
1636 static int
1637 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1638 {
1639         struct e1000_interrupt *intr =
1640                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1641
1642         intr->mask |= E1000_ICR_LSC;
1643
1644         return 0;
1645 }
1646
1647 /*
1648  * It reads ICR and gets interrupt causes, check it and set a bit flag
1649  * to update link status.
1650  *
1651  * @param dev
1652  *  Pointer to struct rte_eth_dev.
1653  *
1654  * @return
1655  *  - On success, zero.
1656  *  - On failure, a negative value.
1657  */
1658 static int
1659 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1660 {
1661         uint32_t icr;
1662         struct e1000_hw *hw =
1663                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1664         struct e1000_interrupt *intr =
1665                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1666
1667         igb_intr_disable(hw);
1668
1669         /* read-on-clear nic registers here */
1670         icr = E1000_READ_REG(hw, E1000_ICR);
1671
1672         intr->flags = 0;
1673         if (icr & E1000_ICR_LSC) {
1674                 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1675         }
1676
1677         if (icr & E1000_ICR_VMMB) 
1678                 intr->flags |= E1000_FLAG_MAILBOX;
1679
1680         return 0;
1681 }
1682
1683 /*
1684  * It executes link_update after knowing an interrupt is prsent.
1685  *
1686  * @param dev
1687  *  Pointer to struct rte_eth_dev.
1688  *
1689  * @return
1690  *  - On success, zero.
1691  *  - On failure, a negative value.
1692  */
1693 static int
1694 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1695 {
1696         struct e1000_hw *hw =
1697                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1698         struct e1000_interrupt *intr =
1699                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1700         uint32_t tctl, rctl;
1701         struct rte_eth_link link;
1702         int ret;
1703
1704         if (intr->flags & E1000_FLAG_MAILBOX) {
1705                 igb_pf_mbx_process(dev);
1706                 intr->flags &= ~E1000_FLAG_MAILBOX;
1707         }
1708
1709         igb_intr_enable(dev);
1710         rte_intr_enable(&(dev->pci_dev->intr_handle));
1711
1712         if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1713                 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1714
1715                 /* set get_link_status to check register later */
1716                 hw->mac.get_link_status = 1;
1717                 ret = eth_igb_link_update(dev, 0);
1718
1719                 /* check if link has changed */
1720                 if (ret < 0)
1721                         return 0;
1722
1723                 memset(&link, 0, sizeof(link));
1724                 rte_igb_dev_atomic_read_link_status(dev, &link);
1725                 if (link.link_status) {
1726                         PMD_INIT_LOG(INFO,
1727                                 " Port %d: Link Up - speed %u Mbps - %s\n",
1728                                 dev->data->port_id, (unsigned)link.link_speed,
1729                                 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1730                                         "full-duplex" : "half-duplex");
1731                 } else {
1732                         PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
1733                                                 dev->data->port_id);
1734                 }
1735                 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1736                                         dev->pci_dev->addr.domain,
1737                                         dev->pci_dev->addr.bus,
1738                                         dev->pci_dev->addr.devid,
1739                                         dev->pci_dev->addr.function);
1740                 tctl = E1000_READ_REG(hw, E1000_TCTL);
1741                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1742                 if (link.link_status) {
1743                         /* enable Tx/Rx */
1744                         tctl |= E1000_TCTL_EN;
1745                         rctl |= E1000_RCTL_EN;
1746                 } else {
1747                         /* disable Tx/Rx */
1748                         tctl &= ~E1000_TCTL_EN;
1749                         rctl &= ~E1000_RCTL_EN;
1750                 }
1751                 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1752                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1753                 E1000_WRITE_FLUSH(hw);
1754                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1755         }
1756
1757         return 0;
1758 }
1759
1760 /**
1761  * Interrupt handler which shall be registered at first.
1762  *
1763  * @param handle
1764  *  Pointer to interrupt handle.
1765  * @param param
1766  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1767  *
1768  * @return
1769  *  void
1770  */
1771 static void
1772 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1773                                                         void *param)
1774 {
1775         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1776
1777         eth_igb_interrupt_get_status(dev);
1778         eth_igb_interrupt_action(dev);
1779 }
1780
1781 static int
1782 eth_igb_led_on(struct rte_eth_dev *dev)
1783 {
1784         struct e1000_hw *hw;
1785
1786         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1787         return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1788 }
1789
1790 static int
1791 eth_igb_led_off(struct rte_eth_dev *dev)
1792 {
1793         struct e1000_hw *hw;
1794
1795         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1796         return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1797 }
1798
1799 static int
1800 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1801 {
1802         struct e1000_hw *hw;
1803         int err;
1804         enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
1805                 e1000_fc_none,
1806                 e1000_fc_rx_pause,
1807                 e1000_fc_tx_pause,
1808                 e1000_fc_full
1809         };
1810         uint32_t rx_buf_size;
1811         uint32_t max_high_water;
1812         uint32_t rctl;
1813
1814         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1815         rx_buf_size = igb_get_rx_buffer_size(hw);
1816         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1817
1818         /* At least reserve one Ethernet frame for watermark */
1819         max_high_water = rx_buf_size - ETHER_MAX_LEN;
1820         if ((fc_conf->high_water > max_high_water) ||
1821                 (fc_conf->high_water < fc_conf->low_water)) {
1822                 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
1823                 PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
1824                 return (-EINVAL);
1825         }
1826
1827         hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
1828         hw->fc.pause_time     = fc_conf->pause_time;
1829         hw->fc.high_water     = fc_conf->high_water;
1830         hw->fc.low_water      = fc_conf->low_water;
1831         hw->fc.send_xon       = fc_conf->send_xon;
1832
1833         err = e1000_setup_link_generic(hw);
1834         if (err == E1000_SUCCESS) {
1835
1836                 /* check if we want to forward MAC frames - driver doesn't have native
1837                  * capability to do that, so we'll write the registers ourselves */
1838
1839                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1840
1841                 /* set or clear MFLCN.PMCF bit depending on configuration */
1842                 if (fc_conf->mac_ctrl_frame_fwd != 0)
1843                         rctl |= E1000_RCTL_PMCF;
1844                 else
1845                         rctl &= ~E1000_RCTL_PMCF;
1846
1847                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1848                 E1000_WRITE_FLUSH(hw);
1849
1850                 return 0;
1851         }
1852
1853         PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
1854         return (-EIO);
1855 }
1856
1857 #define E1000_RAH_POOLSEL_SHIFT      (18)
1858 static void
1859 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1860                 uint32_t index, __rte_unused uint32_t pool)
1861 {
1862         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1863         uint32_t rah;
1864
1865         e1000_rar_set(hw, mac_addr->addr_bytes, index);
1866         rah = E1000_READ_REG(hw, E1000_RAH(index));
1867         rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
1868         E1000_WRITE_REG(hw, E1000_RAH(index), rah);
1869 }
1870
1871 static void
1872 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1873 {
1874         uint8_t addr[ETHER_ADDR_LEN];
1875         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1876
1877         memset(addr, 0, sizeof(addr));
1878
1879         e1000_rar_set(hw, addr, index);
1880 }
1881
1882 /*
1883  * Virtual Function operations
1884  */
1885 static void
1886 igbvf_intr_disable(struct e1000_hw *hw)
1887 {
1888         PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
1889
1890         /* Clear interrupt mask to stop from interrupts being generated */
1891         E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
1892
1893         E1000_WRITE_FLUSH(hw);
1894 }
1895
1896 static void
1897 igbvf_stop_adapter(struct rte_eth_dev *dev)
1898 {
1899         u32 reg_val;
1900         u16 i;
1901         struct rte_eth_dev_info dev_info;
1902         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1903
1904         memset(&dev_info, 0, sizeof(dev_info));
1905         eth_igb_infos_get(dev, &dev_info);
1906
1907         /* Clear interrupt mask to stop from interrupts being generated */
1908         igbvf_intr_disable(hw);
1909
1910         /* Clear any pending interrupts, flush previous writes */
1911         E1000_READ_REG(hw, E1000_EICR);
1912
1913         /* Disable the transmit unit.  Each queue must be disabled. */
1914         for (i = 0; i < dev_info.max_tx_queues; i++)
1915                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
1916
1917         /* Disable the receive unit by stopping each queue */
1918         for (i = 0; i < dev_info.max_rx_queues; i++) {
1919                 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
1920                 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
1921                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
1922                 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
1923                         ;
1924         }
1925
1926         /* flush all queues disables */
1927         E1000_WRITE_FLUSH(hw);
1928         msec_delay(2);
1929 }
1930
1931 static int eth_igbvf_link_update(struct e1000_hw *hw)
1932 {
1933         struct e1000_mbx_info *mbx = &hw->mbx;
1934         struct e1000_mac_info *mac = &hw->mac;
1935         int ret_val = E1000_SUCCESS;
1936
1937         PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
1938
1939         /*
1940          * We only want to run this if there has been a rst asserted.
1941          * in this case that could mean a link change, device reset,
1942          * or a virtual function reset
1943          */
1944
1945         /* If we were hit with a reset or timeout drop the link */
1946         if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
1947                 mac->get_link_status = TRUE;
1948
1949         if (!mac->get_link_status)
1950                 goto out;
1951
1952         /* if link status is down no point in checking to see if pf is up */
1953         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
1954                 goto out;
1955
1956         /* if we passed all the tests above then the link is up and we no
1957          * longer need to check for link */
1958         mac->get_link_status = FALSE;
1959
1960 out:
1961         return ret_val;
1962 }
1963
1964
1965 static int
1966 igbvf_dev_configure(struct rte_eth_dev *dev)
1967 {
1968         struct rte_eth_conf* conf = &dev->data->dev_conf;
1969
1970         PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
1971                 dev->data->port_id);
1972
1973         /*
1974          * VF has no ability to enable/disable HW CRC
1975          * Keep the persistent behavior the same as Host PF
1976          */
1977 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
1978         if (!conf->rxmode.hw_strip_crc) {
1979                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1980                 conf->rxmode.hw_strip_crc = 1;
1981         }
1982 #else
1983         if (conf->rxmode.hw_strip_crc) {
1984                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
1985                 conf->rxmode.hw_strip_crc = 0;
1986         }
1987 #endif
1988
1989         return 0;
1990 }
1991
1992 static int
1993 igbvf_dev_start(struct rte_eth_dev *dev)
1994 {
1995         struct e1000_hw *hw = 
1996                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1997         int ret;
1998
1999         PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
2000
2001         hw->mac.ops.reset_hw(hw);
2002
2003         /* Set all vfta */
2004         igbvf_set_vfta_all(dev,1);
2005         
2006         eth_igbvf_tx_init(dev);
2007
2008         /* This can fail when allocating mbufs for descriptor rings */
2009         ret = eth_igbvf_rx_init(dev);
2010         if (ret) {
2011                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2012                 igb_dev_clear_queues(dev);
2013                 return ret;
2014         }
2015
2016         return 0;
2017 }
2018
2019 static void
2020 igbvf_dev_stop(struct rte_eth_dev *dev)
2021 {
2022         PMD_INIT_LOG(DEBUG, "igbvf_dev_stop");
2023
2024         igbvf_stop_adapter(dev);
2025         
2026         /* 
2027           * Clear what we set, but we still keep shadow_vfta to 
2028           * restore after device starts
2029           */
2030         igbvf_set_vfta_all(dev,0);
2031
2032         igb_dev_clear_queues(dev);
2033 }
2034
2035 static void
2036 igbvf_dev_close(struct rte_eth_dev *dev)
2037 {
2038         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2039
2040         PMD_INIT_LOG(DEBUG, "igbvf_dev_close");
2041
2042         e1000_reset_hw(hw);
2043
2044         igbvf_dev_stop(dev);
2045 }
2046
2047 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2048 {
2049         struct e1000_mbx_info *mbx = &hw->mbx;
2050         uint32_t msgbuf[2];
2051
2052         /* After set vlan, vlan strip will also be enabled in igb driver*/ 
2053         msgbuf[0] = E1000_VF_SET_VLAN;
2054         msgbuf[1] = vid;
2055         /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2056         if (on)
2057                 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2058
2059         return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2060 }
2061
2062 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2063 {
2064         struct e1000_hw *hw = 
2065                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2066         struct e1000_vfta * shadow_vfta =
2067                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2068         int i = 0, j = 0, vfta = 0, mask = 1;
2069
2070         for (i = 0; i < IGB_VFTA_SIZE; i++){
2071                 vfta = shadow_vfta->vfta[i];
2072                 if(vfta){
2073                         mask = 1;
2074                         for (j = 0; j < 32; j++){
2075                                 if(vfta & mask)
2076                                         igbvf_set_vfta(hw,
2077                                                 (uint16_t)((i<<5)+j), on);
2078                                 mask<<=1;
2079                         }
2080                 }
2081         }
2082
2083 }
2084
2085 static int
2086 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2087 {
2088         struct e1000_hw *hw = 
2089                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090         struct e1000_vfta * shadow_vfta =
2091                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2092         uint32_t vid_idx = 0;
2093         uint32_t vid_bit = 0;
2094         int ret = 0;
2095         
2096         PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set");
2097
2098         /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2099         ret = igbvf_set_vfta(hw, vlan_id, !!on);
2100         if(ret){
2101                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2102                 return ret;
2103         }
2104         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2105         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2106
2107         /*Save what we set and retore it after device reset*/
2108         if (on)
2109                 shadow_vfta->vfta[vid_idx] |= vid_bit;
2110         else
2111                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2112
2113         return 0;
2114 }
2115
2116 static int
2117 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2118                                 struct rte_eth_rss_reta *reta_conf)
2119 {
2120         uint8_t i,j,mask;
2121         uint32_t reta;  
2122         struct e1000_hw *hw =
2123                         E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 
2124         
2125         /*    
2126          * Update Redirection Table RETA[n],n=0...31,The redirection table has 
2127          * 128-entries in 32 registers 
2128          */ 
2129         for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {  
2130                 if (i < ETH_RSS_RETA_NUM_ENTRIES/2) 
2131                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2132                 else
2133                         mask = (uint8_t)((reta_conf->mask_hi >>
2134                                 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2135                 if (mask != 0) {
2136                         reta = 0;
2137                         /* If all 4 entries were set,don't need read RETA register */
2138                         if (mask != 0xF)  
2139                                 reta = E1000_READ_REG(hw,E1000_RETA(i >> 2));
2140
2141                         for (j = 0; j < 4; j++) {
2142                                 if (mask & (0x1 << j)) {
2143                                         if (mask != 0xF)
2144                                                 reta &= ~(0xFF << 8 * j);
2145                                         reta |= reta_conf->reta[i + j] << 8 * j;
2146                                 }
2147                         }
2148                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2),reta);
2149                 }
2150         }
2151
2152         return 0;
2153 }
2154
2155 static int
2156 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2157                                 struct rte_eth_rss_reta *reta_conf)
2158 {
2159         uint8_t i,j,mask;
2160         uint32_t reta;
2161         struct e1000_hw *hw = 
2162                         E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2163
2164         /* 
2165          * Read Redirection Table RETA[n],n=0...31,The redirection table has 
2166          * 128-entries in 32 registers
2167          */
2168         for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
2169                 if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
2170                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2171                 else
2172                         mask = (uint8_t)((reta_conf->mask_hi >>
2173                                 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2174
2175                 if (mask != 0) {
2176                         reta = E1000_READ_REG(hw,E1000_RETA(i >> 2));
2177                         for (j = 0; j < 4; j++) {
2178                                 if (mask & (0x1 << j))
2179                                         reta_conf->reta[i + j] =
2180                                                 (uint8_t)((reta >> 8 * j) & 0xFF);
2181                         }
2182                 }
2183         }
2184  
2185         return 0;
2186 }
2187
2188 static struct rte_driver pmd_igb_drv = {
2189         .type = PMD_PDEV,
2190         .init = rte_igb_pmd_init,
2191 };
2192
2193 static struct rte_driver pmd_igbvf_drv = {
2194         .type = PMD_PDEV,
2195         .init = rte_igbvf_pmd_init,
2196 };
2197
2198 PMD_REGISTER_DRIVER(pmd_igb_drv);
2199 PMD_REGISTER_DRIVER(pmd_igbvf_drv);