net/ngbe: support device promiscuous and allmulticast mode
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
90         /* MNG RxTx */
91         HW_XSTAT(mng_bmc2host_packets),
92         HW_XSTAT(mng_host2bmc_packets),
93         /* Basic RxTx */
94         HW_XSTAT(rx_packets),
95         HW_XSTAT(tx_packets),
96         HW_XSTAT(rx_bytes),
97         HW_XSTAT(tx_bytes),
98         HW_XSTAT(rx_total_bytes),
99         HW_XSTAT(rx_total_packets),
100         HW_XSTAT(tx_total_packets),
101         HW_XSTAT(rx_total_missed_packets),
102         HW_XSTAT(rx_broadcast_packets),
103         HW_XSTAT(rx_multicast_packets),
104         HW_XSTAT(rx_management_packets),
105         HW_XSTAT(tx_management_packets),
106         HW_XSTAT(rx_management_dropped),
107
108         /* Basic Error */
109         HW_XSTAT(rx_crc_errors),
110         HW_XSTAT(rx_illegal_byte_errors),
111         HW_XSTAT(rx_error_bytes),
112         HW_XSTAT(rx_mac_short_packet_dropped),
113         HW_XSTAT(rx_length_errors),
114         HW_XSTAT(rx_undersize_errors),
115         HW_XSTAT(rx_fragment_errors),
116         HW_XSTAT(rx_oversize_errors),
117         HW_XSTAT(rx_jabber_errors),
118         HW_XSTAT(rx_l3_l4_xsum_error),
119         HW_XSTAT(mac_local_errors),
120         HW_XSTAT(mac_remote_errors),
121
122         /* MACSEC */
123         HW_XSTAT(tx_macsec_pkts_untagged),
124         HW_XSTAT(tx_macsec_pkts_encrypted),
125         HW_XSTAT(tx_macsec_pkts_protected),
126         HW_XSTAT(tx_macsec_octets_encrypted),
127         HW_XSTAT(tx_macsec_octets_protected),
128         HW_XSTAT(rx_macsec_pkts_untagged),
129         HW_XSTAT(rx_macsec_pkts_badtag),
130         HW_XSTAT(rx_macsec_pkts_nosci),
131         HW_XSTAT(rx_macsec_pkts_unknownsci),
132         HW_XSTAT(rx_macsec_octets_decrypted),
133         HW_XSTAT(rx_macsec_octets_validated),
134         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135         HW_XSTAT(rx_macsec_sc_pkts_delayed),
136         HW_XSTAT(rx_macsec_sc_pkts_late),
137         HW_XSTAT(rx_macsec_sa_pkts_ok),
138         HW_XSTAT(rx_macsec_sa_pkts_invalid),
139         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
142
143         /* MAC RxTx */
144         HW_XSTAT(rx_size_64_packets),
145         HW_XSTAT(rx_size_65_to_127_packets),
146         HW_XSTAT(rx_size_128_to_255_packets),
147         HW_XSTAT(rx_size_256_to_511_packets),
148         HW_XSTAT(rx_size_512_to_1023_packets),
149         HW_XSTAT(rx_size_1024_to_max_packets),
150         HW_XSTAT(tx_size_64_packets),
151         HW_XSTAT(tx_size_65_to_127_packets),
152         HW_XSTAT(tx_size_128_to_255_packets),
153         HW_XSTAT(tx_size_256_to_511_packets),
154         HW_XSTAT(tx_size_512_to_1023_packets),
155         HW_XSTAT(tx_size_1024_to_max_packets),
156
157         /* Flow Control */
158         HW_XSTAT(tx_xon_packets),
159         HW_XSTAT(rx_xon_packets),
160         HW_XSTAT(tx_xoff_packets),
161         HW_XSTAT(rx_xoff_packets),
162
163         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
167 };
168
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170                            sizeof(rte_ngbe_stats_strings[0]))
171
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175         QP_XSTAT(rx_qp_packets),
176         QP_XSTAT(tx_qp_packets),
177         QP_XSTAT(rx_qp_bytes),
178         QP_XSTAT(tx_qp_bytes),
179         QP_XSTAT(rx_qp_mc_packets),
180 };
181
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183                            sizeof(rte_ngbe_qp_strings[0]))
184
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
187 {
188         uint32_t ctrl_ext;
189         int32_t status;
190
191         status = hw->mac.reset_hw(hw);
192
193         ctrl_ext = rd32(hw, NGBE_PORTCTL);
194         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196         wr32(hw, NGBE_PORTCTL, ctrl_ext);
197         ngbe_flush(hw);
198
199         if (status == NGBE_ERR_SFP_NOT_PRESENT)
200                 status = 0;
201         return status;
202 }
203
204 static inline void
205 ngbe_enable_intr(struct rte_eth_dev *dev)
206 {
207         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208         struct ngbe_hw *hw = ngbe_dev_hw(dev);
209
210         wr32(hw, NGBE_IENMISC, intr->mask_misc);
211         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
212         ngbe_flush(hw);
213 }
214
215 static void
216 ngbe_disable_intr(struct ngbe_hw *hw)
217 {
218         PMD_INIT_FUNC_TRACE();
219
220         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
221         ngbe_flush(hw);
222 }
223
224 /*
225  * Ensure that all locks are released before first NVM or PHY access
226  */
227 static void
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
229 {
230         uint16_t mask;
231
232         /*
233          * These ones are more tricky since they are common to all ports; but
234          * swfw_sync retries last long enough (1s) to be almost sure that if
235          * lock can not be taken it is due to an improper lock of the
236          * semaphore.
237          */
238         mask = NGBE_MNGSEM_SWPHY |
239                NGBE_MNGSEM_SWMBX |
240                NGBE_MNGSEM_SWFLASH;
241         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
243
244         hw->mac.release_swfw_sync(hw, mask);
245 }
246
247 static int
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
249 {
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255         const struct rte_memzone *mz;
256         uint32_t ctrl_ext;
257         int err;
258
259         PMD_INIT_FUNC_TRACE();
260
261         eth_dev->dev_ops = &ngbe_eth_dev_ops;
262         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
265
266         /*
267          * For secondary processes, we don't initialise any further as primary
268          * has already done this work. Only check we don't need a different
269          * Rx and Tx function.
270          */
271         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272                 struct ngbe_tx_queue *txq;
273                 /* Tx queue function in primary, set by last queue initialized
274                  * Tx queue may not initialized by primary process
275                  */
276                 if (eth_dev->data->tx_queues) {
277                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279                         ngbe_set_tx_function(eth_dev, txq);
280                 } else {
281                         /* Use default Tx function if we get here */
282                         PMD_INIT_LOG(NOTICE,
283                                 "No Tx queues configured yet. Using default Tx function.");
284                 }
285
286                 ngbe_set_rx_function(eth_dev);
287
288                 return 0;
289         }
290
291         rte_eth_copy_pci_info(eth_dev, pci_dev);
292         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
293
294         /* Vendor and Device ID need to be set before init of shared code */
295         hw->device_id = pci_dev->id.device_id;
296         hw->vendor_id = pci_dev->id.vendor_id;
297         hw->sub_system_id = pci_dev->id.subsystem_device_id;
298         ngbe_map_device_id(hw);
299         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
300
301         /* Reserve memory for interrupt status block */
302         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
304         if (mz == NULL)
305                 return -ENOMEM;
306
307         hw->isb_dma = TMZ_PADDR(mz);
308         hw->isb_mem = TMZ_VADDR(mz);
309
310         /* Initialize the shared code (base driver) */
311         err = ngbe_init_shared_code(hw);
312         if (err != 0) {
313                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
314                 return -EIO;
315         }
316
317         /* Unlock any pending hardware semaphore */
318         ngbe_swfw_lock_reset(hw);
319
320         err = hw->rom.init_params(hw);
321         if (err != 0) {
322                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
323                 return -EIO;
324         }
325
326         /* Make sure we have a good EEPROM before we read from it */
327         err = hw->rom.validate_checksum(hw, NULL);
328         if (err != 0) {
329                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
330                 return -EIO;
331         }
332
333         err = hw->mac.init_hw(hw);
334         if (err != 0) {
335                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
336                 return -EIO;
337         }
338
339         /* Reset the hw statistics */
340         ngbe_dev_stats_reset(eth_dev);
341
342         /* disable interrupt */
343         ngbe_disable_intr(hw);
344
345         /* Allocate memory for storing MAC addresses */
346         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347                                                hw->mac.num_rar_entries, 0);
348         if (eth_dev->data->mac_addrs == NULL) {
349                 PMD_INIT_LOG(ERR,
350                              "Failed to allocate %u bytes needed to store MAC addresses",
351                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
352                 return -ENOMEM;
353         }
354
355         /* Copy the permanent MAC address */
356         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357                         &eth_dev->data->mac_addrs[0]);
358
359         /* Allocate memory for storing hash filter MAC addresses */
360         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362         if (eth_dev->data->hash_mac_addrs == NULL) {
363                 PMD_INIT_LOG(ERR,
364                              "Failed to allocate %d bytes needed to store MAC addresses",
365                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366                 rte_free(eth_dev->data->mac_addrs);
367                 eth_dev->data->mac_addrs = NULL;
368                 return -ENOMEM;
369         }
370
371         /* initialize the vfta */
372         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
373
374         /* initialize the hw strip bitmap*/
375         memset(hwstrip, 0, sizeof(*hwstrip));
376
377         ctrl_ext = rd32(hw, NGBE_PORTCTL);
378         /* let hardware know driver is loaded */
379         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382         wr32(hw, NGBE_PORTCTL, ctrl_ext);
383         ngbe_flush(hw);
384
385         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386                         (int)hw->mac.type, (int)hw->phy.type);
387
388         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389                      eth_dev->data->port_id, pci_dev->id.vendor_id,
390                      pci_dev->id.device_id);
391
392         rte_intr_callback_register(intr_handle,
393                                    ngbe_dev_interrupt_handler, eth_dev);
394
395         /* enable uio/vfio intr/eventfd mapping */
396         rte_intr_enable(intr_handle);
397
398         /* enable support intr */
399         ngbe_enable_intr(eth_dev);
400
401         return 0;
402 }
403
404 static int
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
406 {
407         PMD_INIT_FUNC_TRACE();
408
409         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410                 return 0;
411
412         ngbe_dev_close(eth_dev);
413
414         return 0;
415 }
416
417 static int
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419                 struct rte_pci_device *pci_dev)
420 {
421         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422                         sizeof(struct ngbe_adapter),
423                         eth_dev_pci_specific_init, pci_dev,
424                         eth_ngbe_dev_init, NULL);
425 }
426
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
428 {
429         struct rte_eth_dev *ethdev;
430
431         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
432         if (ethdev == NULL)
433                 return 0;
434
435         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
436 }
437
438 static struct rte_pci_driver rte_ngbe_pmd = {
439         .id_table = pci_id_ngbe_map,
440         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441                      RTE_PCI_DRV_INTR_LSC,
442         .probe = eth_ngbe_pci_probe,
443         .remove = eth_ngbe_pci_remove,
444 };
445
446 static int
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
448 {
449         struct ngbe_hw *hw = ngbe_dev_hw(dev);
450         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
451         uint32_t vfta;
452         uint32_t vid_idx;
453         uint32_t vid_bit;
454
455         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
458         if (on)
459                 vfta |= vid_bit;
460         else
461                 vfta &= ~vid_bit;
462         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
463
464         /* update local VFTA copy */
465         shadow_vfta->vfta[vid_idx] = vfta;
466
467         return 0;
468 }
469
470 static void
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
472 {
473         struct ngbe_hw *hw = ngbe_dev_hw(dev);
474         struct ngbe_rx_queue *rxq;
475         bool restart;
476         uint32_t rxcfg, rxbal, rxbah;
477
478         if (on)
479                 ngbe_vlan_hw_strip_enable(dev, queue);
480         else
481                 ngbe_vlan_hw_strip_disable(dev, queue);
482
483         rxq = dev->data->rx_queues[queue];
484         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489                         !(rxcfg & NGBE_RXCFG_VLAN);
490                 rxcfg |= NGBE_RXCFG_VLAN;
491         } else {
492                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493                         (rxcfg & NGBE_RXCFG_VLAN);
494                 rxcfg &= ~NGBE_RXCFG_VLAN;
495         }
496         rxcfg &= ~NGBE_RXCFG_ENA;
497
498         if (restart) {
499                 /* set vlan strip for ring */
500                 ngbe_dev_rx_queue_stop(dev, queue);
501                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504                 ngbe_dev_rx_queue_start(dev, queue);
505         }
506 }
507
508 static int
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510                     enum rte_vlan_type vlan_type,
511                     uint16_t tpid)
512 {
513         struct ngbe_hw *hw = ngbe_dev_hw(dev);
514         int ret = 0;
515         uint32_t portctrl, vlan_ext, qinq;
516
517         portctrl = rd32(hw, NGBE_PORTCTL);
518
519         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
521         switch (vlan_type) {
522         case RTE_ETH_VLAN_TYPE_INNER:
523                 if (vlan_ext) {
524                         wr32m(hw, NGBE_VLANCTL,
525                                 NGBE_VLANCTL_TPID_MASK,
526                                 NGBE_VLANCTL_TPID(tpid));
527                         wr32m(hw, NGBE_DMATXCTRL,
528                                 NGBE_DMATXCTRL_TPID_MASK,
529                                 NGBE_DMATXCTRL_TPID(tpid));
530                 } else {
531                         ret = -ENOTSUP;
532                         PMD_DRV_LOG(ERR,
533                                 "Inner type is not supported by single VLAN");
534                 }
535
536                 if (qinq) {
537                         wr32m(hw, NGBE_TAGTPID(0),
538                                 NGBE_TAGTPID_LSB_MASK,
539                                 NGBE_TAGTPID_LSB(tpid));
540                 }
541                 break;
542         case RTE_ETH_VLAN_TYPE_OUTER:
543                 if (vlan_ext) {
544                         /* Only the high 16-bits is valid */
545                         wr32m(hw, NGBE_EXTAG,
546                                 NGBE_EXTAG_VLAN_MASK,
547                                 NGBE_EXTAG_VLAN(tpid));
548                 } else {
549                         wr32m(hw, NGBE_VLANCTL,
550                                 NGBE_VLANCTL_TPID_MASK,
551                                 NGBE_VLANCTL_TPID(tpid));
552                         wr32m(hw, NGBE_DMATXCTRL,
553                                 NGBE_DMATXCTRL_TPID_MASK,
554                                 NGBE_DMATXCTRL_TPID(tpid));
555                 }
556
557                 if (qinq) {
558                         wr32m(hw, NGBE_TAGTPID(0),
559                                 NGBE_TAGTPID_MSB_MASK,
560                                 NGBE_TAGTPID_MSB(tpid));
561                 }
562                 break;
563         default:
564                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
565                 return -EINVAL;
566         }
567
568         return ret;
569 }
570
571 void
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
573 {
574         struct ngbe_hw *hw = ngbe_dev_hw(dev);
575         uint32_t vlnctrl;
576
577         PMD_INIT_FUNC_TRACE();
578
579         /* Filter Table Disable */
580         vlnctrl = rd32(hw, NGBE_VLANCTL);
581         vlnctrl &= ~NGBE_VLANCTL_VFE;
582         wr32(hw, NGBE_VLANCTL, vlnctrl);
583 }
584
585 void
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
587 {
588         struct ngbe_hw *hw = ngbe_dev_hw(dev);
589         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
590         uint32_t vlnctrl;
591         uint16_t i;
592
593         PMD_INIT_FUNC_TRACE();
594
595         /* Filter Table Enable */
596         vlnctrl = rd32(hw, NGBE_VLANCTL);
597         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598         vlnctrl |= NGBE_VLANCTL_VFE;
599         wr32(hw, NGBE_VLANCTL, vlnctrl);
600
601         /* write whatever is in local vfta copy */
602         for (i = 0; i < NGBE_VFTA_SIZE; i++)
603                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
604 }
605
606 void
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
608 {
609         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610         struct ngbe_rx_queue *rxq;
611
612         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
613                 return;
614
615         if (on)
616                 NGBE_SET_HWSTRIP(hwstrip, queue);
617         else
618                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
619
620         if (queue >= dev->data->nb_rx_queues)
621                 return;
622
623         rxq = dev->data->rx_queues[queue];
624
625         if (on) {
626                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
628         } else {
629                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
631         }
632 }
633
634 static void
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
636 {
637         struct ngbe_hw *hw = ngbe_dev_hw(dev);
638         uint32_t ctrl;
639
640         PMD_INIT_FUNC_TRACE();
641
642         ctrl = rd32(hw, NGBE_RXCFG(queue));
643         ctrl &= ~NGBE_RXCFG_VLAN;
644         wr32(hw, NGBE_RXCFG(queue), ctrl);
645
646         /* record those setting for HW strip per queue */
647         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
648 }
649
650 static void
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
652 {
653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
654         uint32_t ctrl;
655
656         PMD_INIT_FUNC_TRACE();
657
658         ctrl = rd32(hw, NGBE_RXCFG(queue));
659         ctrl |= NGBE_RXCFG_VLAN;
660         wr32(hw, NGBE_RXCFG(queue), ctrl);
661
662         /* record those setting for HW strip per queue */
663         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
664 }
665
666 static void
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
668 {
669         struct ngbe_hw *hw = ngbe_dev_hw(dev);
670         uint32_t ctrl;
671
672         PMD_INIT_FUNC_TRACE();
673
674         ctrl = rd32(hw, NGBE_PORTCTL);
675         ctrl &= ~NGBE_PORTCTL_VLANEXT;
676         ctrl &= ~NGBE_PORTCTL_QINQ;
677         wr32(hw, NGBE_PORTCTL, ctrl);
678 }
679
680 static void
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
682 {
683         struct ngbe_hw *hw = ngbe_dev_hw(dev);
684         uint32_t ctrl;
685
686         PMD_INIT_FUNC_TRACE();
687
688         ctrl  = rd32(hw, NGBE_PORTCTL);
689         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690         wr32(hw, NGBE_PORTCTL, ctrl);
691 }
692
693 static void
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
695 {
696         struct ngbe_hw *hw = ngbe_dev_hw(dev);
697         uint32_t ctrl;
698
699         PMD_INIT_FUNC_TRACE();
700
701         ctrl = rd32(hw, NGBE_PORTCTL);
702         ctrl &= ~NGBE_PORTCTL_QINQ;
703         wr32(hw, NGBE_PORTCTL, ctrl);
704 }
705
706 static void
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
708 {
709         struct ngbe_hw *hw = ngbe_dev_hw(dev);
710         uint32_t ctrl;
711
712         PMD_INIT_FUNC_TRACE();
713
714         ctrl  = rd32(hw, NGBE_PORTCTL);
715         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716         wr32(hw, NGBE_PORTCTL, ctrl);
717 }
718
719 void
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
721 {
722         struct ngbe_rx_queue *rxq;
723         uint16_t i;
724
725         PMD_INIT_FUNC_TRACE();
726
727         for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                 rxq = dev->data->rx_queues[i];
729
730                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731                         ngbe_vlan_hw_strip_enable(dev, i);
732                 else
733                         ngbe_vlan_hw_strip_disable(dev, i);
734         }
735 }
736
737 void
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
739 {
740         uint16_t i;
741         struct rte_eth_rxmode *rxmode;
742         struct ngbe_rx_queue *rxq;
743
744         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745                 rxmode = &dev->data->dev_conf.rxmode;
746                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
748                                 rxq = dev->data->rx_queues[i];
749                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
750                         }
751                 else
752                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
753                                 rxq = dev->data->rx_queues[i];
754                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
755                         }
756         }
757 }
758
759 static int
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
761 {
762         struct rte_eth_rxmode *rxmode;
763         rxmode = &dev->data->dev_conf.rxmode;
764
765         if (mask & RTE_ETH_VLAN_STRIP_MASK)
766                 ngbe_vlan_hw_strip_config(dev);
767
768         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770                         ngbe_vlan_hw_filter_enable(dev);
771                 else
772                         ngbe_vlan_hw_filter_disable(dev);
773         }
774
775         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777                         ngbe_vlan_hw_extend_enable(dev);
778                 else
779                         ngbe_vlan_hw_extend_disable(dev);
780         }
781
782         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784                         ngbe_qinq_hw_strip_enable(dev);
785                 else
786                         ngbe_qinq_hw_strip_disable(dev);
787         }
788
789         return 0;
790 }
791
792 static int
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
794 {
795         ngbe_config_vlan_strip_on_all_queues(dev, mask);
796
797         ngbe_vlan_offload_config(dev, mask);
798
799         return 0;
800 }
801
802 static int
803 ngbe_dev_configure(struct rte_eth_dev *dev)
804 {
805         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
807
808         PMD_INIT_FUNC_TRACE();
809
810         /* set flag to update link status after init */
811         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
812
813         /*
814          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
815          * allocation Rx preconditions we will reset it.
816          */
817         adapter->rx_bulk_alloc_allowed = true;
818
819         return 0;
820 }
821
822 static void
823 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
824 {
825         struct ngbe_hw *hw = ngbe_dev_hw(dev);
826         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
827
828         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
829         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
830         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
831         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
832                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
833         else
834                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
835
836         intr->mask_misc |= NGBE_ICRMISC_GPIO;
837 }
838
839 /*
840  * Configure device link speed and setup link.
841  * It returns 0 on success.
842  */
843 static int
844 ngbe_dev_start(struct rte_eth_dev *dev)
845 {
846         struct ngbe_hw *hw = ngbe_dev_hw(dev);
847         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
848         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
849         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
850         uint32_t intr_vector = 0;
851         int err;
852         bool link_up = false, negotiate = false;
853         uint32_t speed = 0;
854         uint32_t allowed_speeds = 0;
855         int mask = 0;
856         int status;
857         uint32_t *link_speeds;
858
859         PMD_INIT_FUNC_TRACE();
860
861         /* disable uio/vfio intr/eventfd mapping */
862         rte_intr_disable(intr_handle);
863
864         /* stop adapter */
865         hw->adapter_stopped = 0;
866         ngbe_stop_hw(hw);
867
868         /* reinitialize adapter, this calls reset and start */
869         hw->nb_rx_queues = dev->data->nb_rx_queues;
870         hw->nb_tx_queues = dev->data->nb_tx_queues;
871         status = ngbe_pf_reset_hw(hw);
872         if (status != 0)
873                 return -1;
874         hw->mac.start_hw(hw);
875         hw->mac.get_link_status = true;
876
877         ngbe_dev_phy_intr_setup(dev);
878
879         /* check and configure queue intr-vector mapping */
880         if ((rte_intr_cap_multiple(intr_handle) ||
881              !RTE_ETH_DEV_SRIOV(dev).active) &&
882             dev->data->dev_conf.intr_conf.rxq != 0) {
883                 intr_vector = dev->data->nb_rx_queues;
884                 if (rte_intr_efd_enable(intr_handle, intr_vector))
885                         return -1;
886         }
887
888         if (rte_intr_dp_is_en(intr_handle)) {
889                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
890                                                    dev->data->nb_rx_queues)) {
891                         PMD_INIT_LOG(ERR,
892                                      "Failed to allocate %d rx_queues intr_vec",
893                                      dev->data->nb_rx_queues);
894                         return -ENOMEM;
895                 }
896         }
897
898         /* confiugre MSI-X for sleep until Rx interrupt */
899         ngbe_configure_msix(dev);
900
901         /* initialize transmission unit */
902         ngbe_dev_tx_init(dev);
903
904         /* This can fail when allocating mbufs for descriptor rings */
905         err = ngbe_dev_rx_init(dev);
906         if (err != 0) {
907                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
908                 goto error;
909         }
910
911         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
912                 RTE_ETH_VLAN_EXTEND_MASK;
913         err = ngbe_vlan_offload_config(dev, mask);
914         if (err != 0) {
915                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
916                 goto error;
917         }
918
919         ngbe_configure_port(dev);
920
921         err = ngbe_dev_rxtx_start(dev);
922         if (err < 0) {
923                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
924                 goto error;
925         }
926
927         err = hw->mac.check_link(hw, &speed, &link_up, 0);
928         if (err != 0)
929                 goto error;
930         dev->data->dev_link.link_status = link_up;
931
932         link_speeds = &dev->data->dev_conf.link_speeds;
933         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
934                 negotiate = true;
935
936         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
937         if (err != 0)
938                 goto error;
939
940         allowed_speeds = 0;
941         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
942                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
943         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
944                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
945         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
946                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
947
948         if (*link_speeds & ~allowed_speeds) {
949                 PMD_INIT_LOG(ERR, "Invalid link setting");
950                 goto error;
951         }
952
953         speed = 0x0;
954         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
955                 speed = hw->mac.default_speeds;
956         } else {
957                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
958                         speed |= NGBE_LINK_SPEED_1GB_FULL;
959                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
960                         speed |= NGBE_LINK_SPEED_100M_FULL;
961                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
962                         speed |= NGBE_LINK_SPEED_10M_FULL;
963         }
964
965         hw->phy.init_hw(hw);
966         err = hw->mac.setup_link(hw, speed, link_up);
967         if (err != 0)
968                 goto error;
969
970         if (rte_intr_allow_others(intr_handle)) {
971                 ngbe_dev_misc_interrupt_setup(dev);
972                 /* check if lsc interrupt is enabled */
973                 if (dev->data->dev_conf.intr_conf.lsc != 0)
974                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
975                 else
976                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
977                 ngbe_dev_macsec_interrupt_setup(dev);
978                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
979         } else {
980                 rte_intr_callback_unregister(intr_handle,
981                                              ngbe_dev_interrupt_handler, dev);
982                 if (dev->data->dev_conf.intr_conf.lsc != 0)
983                         PMD_INIT_LOG(INFO,
984                                      "LSC won't enable because of no intr multiplex");
985         }
986
987         /* check if rxq interrupt is enabled */
988         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
989             rte_intr_dp_is_en(intr_handle))
990                 ngbe_dev_rxq_interrupt_setup(dev);
991
992         /* enable UIO/VFIO intr/eventfd mapping */
993         rte_intr_enable(intr_handle);
994
995         /* resume enabled intr since HW reset */
996         ngbe_enable_intr(dev);
997
998         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
999                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1000                 /* gpio0 is used to power on/off control*/
1001                 wr32(hw, NGBE_GPIODATA, 0);
1002         }
1003
1004         /*
1005          * Update link status right before return, because it may
1006          * start link configuration process in a separate thread.
1007          */
1008         ngbe_dev_link_update(dev, 0);
1009
1010         ngbe_read_stats_registers(hw, hw_stats);
1011         hw->offset_loaded = 1;
1012
1013         return 0;
1014
1015 error:
1016         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1017         ngbe_dev_clear_queues(dev);
1018         return -EIO;
1019 }
1020
1021 /*
1022  * Stop device: disable rx and tx functions to allow for reconfiguring.
1023  */
1024 static int
1025 ngbe_dev_stop(struct rte_eth_dev *dev)
1026 {
1027         struct rte_eth_link link;
1028         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1029         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1030         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1031
1032         if (hw->adapter_stopped)
1033                 return 0;
1034
1035         PMD_INIT_FUNC_TRACE();
1036
1037         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1038                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1039                 /* gpio0 is used to power on/off control*/
1040                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1041         }
1042
1043         /* disable interrupts */
1044         ngbe_disable_intr(hw);
1045
1046         /* reset the NIC */
1047         ngbe_pf_reset_hw(hw);
1048         hw->adapter_stopped = 0;
1049
1050         /* stop adapter */
1051         ngbe_stop_hw(hw);
1052
1053         ngbe_dev_clear_queues(dev);
1054
1055         /* Clear stored conf */
1056         dev->data->scattered_rx = 0;
1057
1058         /* Clear recorded link status */
1059         memset(&link, 0, sizeof(link));
1060         rte_eth_linkstatus_set(dev, &link);
1061
1062         if (!rte_intr_allow_others(intr_handle))
1063                 /* resume to the default handler */
1064                 rte_intr_callback_register(intr_handle,
1065                                            ngbe_dev_interrupt_handler,
1066                                            (void *)dev);
1067
1068         /* Clean datapath event and queue/vec mapping */
1069         rte_intr_efd_disable(intr_handle);
1070         rte_intr_vec_list_free(intr_handle);
1071
1072         hw->adapter_stopped = true;
1073         dev->data->dev_started = 0;
1074
1075         return 0;
1076 }
1077
1078 /*
1079  * Reset and stop device.
1080  */
1081 static int
1082 ngbe_dev_close(struct rte_eth_dev *dev)
1083 {
1084         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1085         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1086         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1087         int retries = 0;
1088         int ret;
1089
1090         PMD_INIT_FUNC_TRACE();
1091
1092         ngbe_pf_reset_hw(hw);
1093
1094         ngbe_dev_stop(dev);
1095
1096         ngbe_dev_free_queues(dev);
1097
1098         /* reprogram the RAR[0] in case user changed it. */
1099         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1100
1101         /* Unlock any pending hardware semaphore */
1102         ngbe_swfw_lock_reset(hw);
1103
1104         /* disable uio intr before callback unregister */
1105         rte_intr_disable(intr_handle);
1106
1107         do {
1108                 ret = rte_intr_callback_unregister(intr_handle,
1109                                 ngbe_dev_interrupt_handler, dev);
1110                 if (ret >= 0 || ret == -ENOENT) {
1111                         break;
1112                 } else if (ret != -EAGAIN) {
1113                         PMD_INIT_LOG(ERR,
1114                                 "intr callback unregister failed: %d",
1115                                 ret);
1116                 }
1117                 rte_delay_ms(100);
1118         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1119
1120         rte_free(dev->data->mac_addrs);
1121         dev->data->mac_addrs = NULL;
1122
1123         rte_free(dev->data->hash_mac_addrs);
1124         dev->data->hash_mac_addrs = NULL;
1125
1126         return ret;
1127 }
1128
1129 /*
1130  * Reset PF device.
1131  */
1132 static int
1133 ngbe_dev_reset(struct rte_eth_dev *dev)
1134 {
1135         int ret;
1136
1137         ret = eth_ngbe_dev_uninit(dev);
1138         if (ret != 0)
1139                 return ret;
1140
1141         ret = eth_ngbe_dev_init(dev, NULL);
1142
1143         return ret;
1144 }
1145
1146 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1147         {                                                       \
1148                 uint32_t current_counter = rd32(hw, reg);       \
1149                 if (current_counter < last_counter)             \
1150                         current_counter += 0x100000000LL;       \
1151                 if (!hw->offset_loaded)                         \
1152                         last_counter = current_counter;         \
1153                 counter = current_counter - last_counter;       \
1154                 counter &= 0xFFFFFFFFLL;                        \
1155         }
1156
1157 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1158         {                                                                \
1159                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1160                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1161                 uint64_t current_counter = (current_counter_msb << 32) | \
1162                         current_counter_lsb;                             \
1163                 if (current_counter < last_counter)                      \
1164                         current_counter += 0x1000000000LL;               \
1165                 if (!hw->offset_loaded)                                  \
1166                         last_counter = current_counter;                  \
1167                 counter = current_counter - last_counter;                \
1168                 counter &= 0xFFFFFFFFFLL;                                \
1169         }
1170
1171 void
1172 ngbe_read_stats_registers(struct ngbe_hw *hw,
1173                            struct ngbe_hw_stats *hw_stats)
1174 {
1175         unsigned int i;
1176
1177         /* QP Stats */
1178         for (i = 0; i < hw->nb_rx_queues; i++) {
1179                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1180                         hw->qp_last[i].rx_qp_packets,
1181                         hw_stats->qp[i].rx_qp_packets);
1182                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1183                         hw->qp_last[i].rx_qp_bytes,
1184                         hw_stats->qp[i].rx_qp_bytes);
1185                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1186                         hw->qp_last[i].rx_qp_mc_packets,
1187                         hw_stats->qp[i].rx_qp_mc_packets);
1188                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1189                         hw->qp_last[i].rx_qp_bc_packets,
1190                         hw_stats->qp[i].rx_qp_bc_packets);
1191         }
1192
1193         for (i = 0; i < hw->nb_tx_queues; i++) {
1194                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1195                         hw->qp_last[i].tx_qp_packets,
1196                         hw_stats->qp[i].tx_qp_packets);
1197                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1198                         hw->qp_last[i].tx_qp_bytes,
1199                         hw_stats->qp[i].tx_qp_bytes);
1200                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1201                         hw->qp_last[i].tx_qp_mc_packets,
1202                         hw_stats->qp[i].tx_qp_mc_packets);
1203                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1204                         hw->qp_last[i].tx_qp_bc_packets,
1205                         hw_stats->qp[i].tx_qp_bc_packets);
1206         }
1207
1208         /* PB Stats */
1209         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1210         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1211         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1212         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1213         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1214         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1215
1216         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1217         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1218
1219         /* DMA Stats */
1220         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1221         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1222         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1223         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1224         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1225         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1226         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1227         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1228
1229         /* MAC Stats */
1230         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1231         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1232         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1233
1234         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1235         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1236         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1237
1238         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1239         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1240
1241         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1242         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1243         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1244         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1245         hw_stats->rx_size_512_to_1023_packets +=
1246                         rd64(hw, NGBE_MACRX512TO1023L);
1247         hw_stats->rx_size_1024_to_max_packets +=
1248                         rd64(hw, NGBE_MACRX1024TOMAXL);
1249         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1250         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1251         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1252         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1253         hw_stats->tx_size_512_to_1023_packets +=
1254                         rd64(hw, NGBE_MACTX512TO1023L);
1255         hw_stats->tx_size_1024_to_max_packets +=
1256                         rd64(hw, NGBE_MACTX1024TOMAXL);
1257
1258         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1259         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1260         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1261
1262         /* MNG Stats */
1263         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1264         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1265         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1266         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1267
1268         /* MACsec Stats */
1269         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1270         hw_stats->tx_macsec_pkts_encrypted +=
1271                         rd32(hw, NGBE_LSECTX_ENCPKT);
1272         hw_stats->tx_macsec_pkts_protected +=
1273                         rd32(hw, NGBE_LSECTX_PROTPKT);
1274         hw_stats->tx_macsec_octets_encrypted +=
1275                         rd32(hw, NGBE_LSECTX_ENCOCT);
1276         hw_stats->tx_macsec_octets_protected +=
1277                         rd32(hw, NGBE_LSECTX_PROTOCT);
1278         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1279         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1280         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1281         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1282         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1283         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1284         hw_stats->rx_macsec_sc_pkts_unchecked +=
1285                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1286         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1287         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1288         for (i = 0; i < 2; i++) {
1289                 hw_stats->rx_macsec_sa_pkts_ok +=
1290                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1291                 hw_stats->rx_macsec_sa_pkts_invalid +=
1292                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1293                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1294                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1295         }
1296         for (i = 0; i < 4; i++) {
1297                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1298                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1299                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1300                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1301         }
1302         hw_stats->rx_total_missed_packets =
1303                         hw_stats->rx_up_dropped;
1304 }
1305
1306 static int
1307 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1308 {
1309         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1310         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1311         struct ngbe_stat_mappings *stat_mappings =
1312                         NGBE_DEV_STAT_MAPPINGS(dev);
1313         uint32_t i, j;
1314
1315         ngbe_read_stats_registers(hw, hw_stats);
1316
1317         if (stats == NULL)
1318                 return -EINVAL;
1319
1320         /* Fill out the rte_eth_stats statistics structure */
1321         stats->ipackets = hw_stats->rx_packets;
1322         stats->ibytes = hw_stats->rx_bytes;
1323         stats->opackets = hw_stats->tx_packets;
1324         stats->obytes = hw_stats->tx_bytes;
1325
1326         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1327         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1328         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1329         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1330         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1331         for (i = 0; i < NGBE_MAX_QP; i++) {
1332                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1333                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1334                 uint32_t q_map;
1335
1336                 q_map = (stat_mappings->rqsm[n] >> offset)
1337                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1338                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1339                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1340                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1341                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1342
1343                 q_map = (stat_mappings->tqsm[n] >> offset)
1344                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1345                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1346                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1347                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1348                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1349         }
1350
1351         /* Rx Errors */
1352         stats->imissed  = hw_stats->rx_total_missed_packets +
1353                           hw_stats->rx_dma_drop;
1354         stats->ierrors  = hw_stats->rx_crc_errors +
1355                           hw_stats->rx_mac_short_packet_dropped +
1356                           hw_stats->rx_length_errors +
1357                           hw_stats->rx_undersize_errors +
1358                           hw_stats->rx_oversize_errors +
1359                           hw_stats->rx_illegal_byte_errors +
1360                           hw_stats->rx_error_bytes +
1361                           hw_stats->rx_fragment_errors;
1362
1363         /* Tx Errors */
1364         stats->oerrors  = 0;
1365         return 0;
1366 }
1367
1368 static int
1369 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1370 {
1371         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1372         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1373
1374         /* HW registers are cleared on read */
1375         hw->offset_loaded = 0;
1376         ngbe_dev_stats_get(dev, NULL);
1377         hw->offset_loaded = 1;
1378
1379         /* Reset software totals */
1380         memset(hw_stats, 0, sizeof(*hw_stats));
1381
1382         return 0;
1383 }
1384
1385 /* This function calculates the number of xstats based on the current config */
1386 static unsigned
1387 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1388 {
1389         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1390         return NGBE_NB_HW_STATS +
1391                NGBE_NB_QP_STATS * nb_queues;
1392 }
1393
1394 static inline int
1395 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1396 {
1397         int nb, st;
1398
1399         /* Extended stats from ngbe_hw_stats */
1400         if (id < NGBE_NB_HW_STATS) {
1401                 snprintf(name, size, "[hw]%s",
1402                         rte_ngbe_stats_strings[id].name);
1403                 return 0;
1404         }
1405         id -= NGBE_NB_HW_STATS;
1406
1407         /* Queue Stats */
1408         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1409                 nb = id / NGBE_NB_QP_STATS;
1410                 st = id % NGBE_NB_QP_STATS;
1411                 snprintf(name, size, "[q%u]%s", nb,
1412                         rte_ngbe_qp_strings[st].name);
1413                 return 0;
1414         }
1415         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1416
1417         return -(int)(id + 1);
1418 }
1419
1420 static inline int
1421 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1422 {
1423         int nb, st;
1424
1425         /* Extended stats from ngbe_hw_stats */
1426         if (id < NGBE_NB_HW_STATS) {
1427                 *offset = rte_ngbe_stats_strings[id].offset;
1428                 return 0;
1429         }
1430         id -= NGBE_NB_HW_STATS;
1431
1432         /* Queue Stats */
1433         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1434                 nb = id / NGBE_NB_QP_STATS;
1435                 st = id % NGBE_NB_QP_STATS;
1436                 *offset = rte_ngbe_qp_strings[st].offset +
1437                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1438                 return 0;
1439         }
1440
1441         return -1;
1442 }
1443
1444 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1445         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1446 {
1447         unsigned int i, count;
1448
1449         count = ngbe_xstats_calc_num(dev);
1450         if (xstats_names == NULL)
1451                 return count;
1452
1453         /* Note: limit >= cnt_stats checked upstream
1454          * in rte_eth_xstats_names()
1455          */
1456         limit = min(limit, count);
1457
1458         /* Extended stats from ngbe_hw_stats */
1459         for (i = 0; i < limit; i++) {
1460                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1461                         sizeof(xstats_names[i].name))) {
1462                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1463                         break;
1464                 }
1465         }
1466
1467         return i;
1468 }
1469
1470 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1471         const uint64_t *ids,
1472         struct rte_eth_xstat_name *xstats_names,
1473         unsigned int limit)
1474 {
1475         unsigned int i;
1476
1477         if (ids == NULL)
1478                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1479
1480         for (i = 0; i < limit; i++) {
1481                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1482                                 sizeof(xstats_names[i].name))) {
1483                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1484                         return -1;
1485                 }
1486         }
1487
1488         return i;
1489 }
1490
1491 static int
1492 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1493                                          unsigned int limit)
1494 {
1495         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1496         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1497         unsigned int i, count;
1498
1499         ngbe_read_stats_registers(hw, hw_stats);
1500
1501         /* If this is a reset xstats is NULL, and we have cleared the
1502          * registers by reading them.
1503          */
1504         count = ngbe_xstats_calc_num(dev);
1505         if (xstats == NULL)
1506                 return count;
1507
1508         limit = min(limit, ngbe_xstats_calc_num(dev));
1509
1510         /* Extended stats from ngbe_hw_stats */
1511         for (i = 0; i < limit; i++) {
1512                 uint32_t offset = 0;
1513
1514                 if (ngbe_get_offset_by_id(i, &offset)) {
1515                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1516                         break;
1517                 }
1518                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1519                 xstats[i].id = i;
1520         }
1521
1522         return i;
1523 }
1524
1525 static int
1526 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1527                                          unsigned int limit)
1528 {
1529         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1530         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1531         unsigned int i, count;
1532
1533         ngbe_read_stats_registers(hw, hw_stats);
1534
1535         /* If this is a reset xstats is NULL, and we have cleared the
1536          * registers by reading them.
1537          */
1538         count = ngbe_xstats_calc_num(dev);
1539         if (values == NULL)
1540                 return count;
1541
1542         limit = min(limit, ngbe_xstats_calc_num(dev));
1543
1544         /* Extended stats from ngbe_hw_stats */
1545         for (i = 0; i < limit; i++) {
1546                 uint32_t offset;
1547
1548                 if (ngbe_get_offset_by_id(i, &offset)) {
1549                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1550                         break;
1551                 }
1552                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1553         }
1554
1555         return i;
1556 }
1557
1558 static int
1559 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1560                 uint64_t *values, unsigned int limit)
1561 {
1562         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1563         unsigned int i;
1564
1565         if (ids == NULL)
1566                 return ngbe_dev_xstats_get_(dev, values, limit);
1567
1568         for (i = 0; i < limit; i++) {
1569                 uint32_t offset;
1570
1571                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1572                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1573                         break;
1574                 }
1575                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1576         }
1577
1578         return i;
1579 }
1580
1581 static int
1582 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1583 {
1584         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1585         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1586
1587         /* HW registers are cleared on read */
1588         hw->offset_loaded = 0;
1589         ngbe_read_stats_registers(hw, hw_stats);
1590         hw->offset_loaded = 1;
1591
1592         /* Reset software totals */
1593         memset(hw_stats, 0, sizeof(*hw_stats));
1594
1595         return 0;
1596 }
1597
1598 static int
1599 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1600 {
1601         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1602
1603         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1604         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1605         dev_info->min_rx_bufsize = 1024;
1606         dev_info->max_rx_pktlen = 15872;
1607         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1608         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1609                                      dev_info->rx_queue_offload_capa);
1610         dev_info->tx_queue_offload_capa = 0;
1611         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1612
1613         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1614                 .rx_thresh = {
1615                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1616                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1617                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1618                 },
1619                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1620                 .rx_drop_en = 0,
1621                 .offloads = 0,
1622         };
1623
1624         dev_info->default_txconf = (struct rte_eth_txconf) {
1625                 .tx_thresh = {
1626                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1627                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1628                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1629                 },
1630                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1631                 .offloads = 0,
1632         };
1633
1634         dev_info->rx_desc_lim = rx_desc_lim;
1635         dev_info->tx_desc_lim = tx_desc_lim;
1636
1637         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1638                                 RTE_ETH_LINK_SPEED_10M;
1639
1640         /* Driver-preferred Rx/Tx parameters */
1641         dev_info->default_rxportconf.burst_size = 32;
1642         dev_info->default_txportconf.burst_size = 32;
1643         dev_info->default_rxportconf.nb_queues = 1;
1644         dev_info->default_txportconf.nb_queues = 1;
1645         dev_info->default_rxportconf.ring_size = 256;
1646         dev_info->default_txportconf.ring_size = 256;
1647
1648         return 0;
1649 }
1650
1651 const uint32_t *
1652 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1653 {
1654         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1655             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1656             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1657             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1658                 return ngbe_get_supported_ptypes();
1659
1660         return NULL;
1661 }
1662
1663 /* return 0 means link status changed, -1 means not changed */
1664 int
1665 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1666                             int wait_to_complete)
1667 {
1668         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1669         struct rte_eth_link link;
1670         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1671         u32 lan_speed = 0;
1672         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1673         bool link_up;
1674         int err;
1675         int wait = 1;
1676
1677         memset(&link, 0, sizeof(link));
1678         link.link_status = RTE_ETH_LINK_DOWN;
1679         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1680         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1681         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1682                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1683
1684         hw->mac.get_link_status = true;
1685
1686         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1687                 return rte_eth_linkstatus_set(dev, &link);
1688
1689         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1690         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1691                 wait = 0;
1692
1693         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1694         if (err != 0) {
1695                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1696                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1697                 return rte_eth_linkstatus_set(dev, &link);
1698         }
1699
1700         if (!link_up)
1701                 return rte_eth_linkstatus_set(dev, &link);
1702
1703         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1704         link.link_status = RTE_ETH_LINK_UP;
1705         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1706
1707         switch (link_speed) {
1708         default:
1709         case NGBE_LINK_SPEED_UNKNOWN:
1710                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1711                 break;
1712
1713         case NGBE_LINK_SPEED_10M_FULL:
1714                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1715                 lan_speed = 0;
1716                 break;
1717
1718         case NGBE_LINK_SPEED_100M_FULL:
1719                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1720                 lan_speed = 1;
1721                 break;
1722
1723         case NGBE_LINK_SPEED_1GB_FULL:
1724                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1725                 lan_speed = 2;
1726                 break;
1727         }
1728
1729         if (hw->is_pf) {
1730                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1731                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1732                                 NGBE_LINK_SPEED_100M_FULL |
1733                                 NGBE_LINK_SPEED_10M_FULL)) {
1734                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1735                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1736                 }
1737         }
1738
1739         return rte_eth_linkstatus_set(dev, &link);
1740 }
1741
1742 static int
1743 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1744 {
1745         return ngbe_dev_link_update_share(dev, wait_to_complete);
1746 }
1747
1748 static int
1749 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1750 {
1751         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1752         uint32_t fctrl;
1753
1754         fctrl = rd32(hw, NGBE_PSRCTL);
1755         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1756         wr32(hw, NGBE_PSRCTL, fctrl);
1757
1758         return 0;
1759 }
1760
1761 static int
1762 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1763 {
1764         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1765         uint32_t fctrl;
1766
1767         fctrl = rd32(hw, NGBE_PSRCTL);
1768         fctrl &= (~NGBE_PSRCTL_UCP);
1769         if (dev->data->all_multicast == 1)
1770                 fctrl |= NGBE_PSRCTL_MCP;
1771         else
1772                 fctrl &= (~NGBE_PSRCTL_MCP);
1773         wr32(hw, NGBE_PSRCTL, fctrl);
1774
1775         return 0;
1776 }
1777
1778 static int
1779 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1780 {
1781         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1782         uint32_t fctrl;
1783
1784         fctrl = rd32(hw, NGBE_PSRCTL);
1785         fctrl |= NGBE_PSRCTL_MCP;
1786         wr32(hw, NGBE_PSRCTL, fctrl);
1787
1788         return 0;
1789 }
1790
1791 static int
1792 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1793 {
1794         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1795         uint32_t fctrl;
1796
1797         if (dev->data->promiscuous == 1)
1798                 return 0; /* must remain in all_multicast mode */
1799
1800         fctrl = rd32(hw, NGBE_PSRCTL);
1801         fctrl &= (~NGBE_PSRCTL_MCP);
1802         wr32(hw, NGBE_PSRCTL, fctrl);
1803
1804         return 0;
1805 }
1806
1807 /**
1808  * It clears the interrupt causes and enables the interrupt.
1809  * It will be called once only during NIC initialized.
1810  *
1811  * @param dev
1812  *  Pointer to struct rte_eth_dev.
1813  * @param on
1814  *  Enable or Disable.
1815  *
1816  * @return
1817  *  - On success, zero.
1818  *  - On failure, a negative value.
1819  */
1820 static int
1821 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1822 {
1823         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1824
1825         ngbe_dev_link_status_print(dev);
1826         if (on != 0) {
1827                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1828                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1829         } else {
1830                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1831                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1832         }
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * It clears the interrupt causes and enables the interrupt.
1839  * It will be called once only during NIC initialized.
1840  *
1841  * @param dev
1842  *  Pointer to struct rte_eth_dev.
1843  *
1844  * @return
1845  *  - On success, zero.
1846  *  - On failure, a negative value.
1847  */
1848 static int
1849 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1850 {
1851         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1852         u64 mask;
1853
1854         mask = NGBE_ICR_MASK;
1855         mask &= (1ULL << NGBE_MISC_VEC_ID);
1856         intr->mask |= mask;
1857         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * It clears the interrupt causes and enables the interrupt.
1864  * It will be called once only during NIC initialized.
1865  *
1866  * @param dev
1867  *  Pointer to struct rte_eth_dev.
1868  *
1869  * @return
1870  *  - On success, zero.
1871  *  - On failure, a negative value.
1872  */
1873 static int
1874 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1875 {
1876         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1877         u64 mask;
1878
1879         mask = NGBE_ICR_MASK;
1880         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1881         intr->mask |= mask;
1882
1883         return 0;
1884 }
1885
1886 /**
1887  * It clears the interrupt causes and enables the interrupt.
1888  * It will be called once only during NIC initialized.
1889  *
1890  * @param dev
1891  *  Pointer to struct rte_eth_dev.
1892  *
1893  * @return
1894  *  - On success, zero.
1895  *  - On failure, a negative value.
1896  */
1897 static int
1898 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1899 {
1900         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1901
1902         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1903
1904         return 0;
1905 }
1906
1907 /*
1908  * It reads ICR and sets flag for the link_update.
1909  *
1910  * @param dev
1911  *  Pointer to struct rte_eth_dev.
1912  *
1913  * @return
1914  *  - On success, zero.
1915  *  - On failure, a negative value.
1916  */
1917 static int
1918 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1919 {
1920         uint32_t eicr;
1921         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1922         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1923
1924         /* clear all cause mask */
1925         ngbe_disable_intr(hw);
1926
1927         /* read-on-clear nic registers here */
1928         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1929         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1930
1931         intr->flags = 0;
1932
1933         /* set flag for async link update */
1934         if (eicr & NGBE_ICRMISC_PHY)
1935                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1936
1937         if (eicr & NGBE_ICRMISC_VFMBX)
1938                 intr->flags |= NGBE_FLAG_MAILBOX;
1939
1940         if (eicr & NGBE_ICRMISC_LNKSEC)
1941                 intr->flags |= NGBE_FLAG_MACSEC;
1942
1943         if (eicr & NGBE_ICRMISC_GPIO)
1944                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1945
1946         return 0;
1947 }
1948
1949 /**
1950  * It gets and then prints the link status.
1951  *
1952  * @param dev
1953  *  Pointer to struct rte_eth_dev.
1954  *
1955  * @return
1956  *  - On success, zero.
1957  *  - On failure, a negative value.
1958  */
1959 static void
1960 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1961 {
1962         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1963         struct rte_eth_link link;
1964
1965         rte_eth_linkstatus_get(dev, &link);
1966
1967         if (link.link_status == RTE_ETH_LINK_UP) {
1968                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1969                                         (int)(dev->data->port_id),
1970                                         (unsigned int)link.link_speed,
1971                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1972                                         "full-duplex" : "half-duplex");
1973         } else {
1974                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1975                                 (int)(dev->data->port_id));
1976         }
1977         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1978                                 pci_dev->addr.domain,
1979                                 pci_dev->addr.bus,
1980                                 pci_dev->addr.devid,
1981                                 pci_dev->addr.function);
1982 }
1983
1984 /*
1985  * It executes link_update after knowing an interrupt occurred.
1986  *
1987  * @param dev
1988  *  Pointer to struct rte_eth_dev.
1989  *
1990  * @return
1991  *  - On success, zero.
1992  *  - On failure, a negative value.
1993  */
1994 static int
1995 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
1996 {
1997         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1998         int64_t timeout;
1999
2000         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2001
2002         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2003                 struct rte_eth_link link;
2004
2005                 /*get the link status before link update, for predicting later*/
2006                 rte_eth_linkstatus_get(dev, &link);
2007
2008                 ngbe_dev_link_update(dev, 0);
2009
2010                 /* likely to up */
2011                 if (link.link_status != RTE_ETH_LINK_UP)
2012                         /* handle it 1 sec later, wait it being stable */
2013                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2014                 /* likely to down */
2015                 else
2016                         /* handle it 4 sec later, wait it being stable */
2017                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2018
2019                 ngbe_dev_link_status_print(dev);
2020                 if (rte_eal_alarm_set(timeout * 1000,
2021                                       ngbe_dev_interrupt_delayed_handler,
2022                                       (void *)dev) < 0) {
2023                         PMD_DRV_LOG(ERR, "Error setting alarm");
2024                 } else {
2025                         /* remember original mask */
2026                         intr->mask_misc_orig = intr->mask_misc;
2027                         /* only disable lsc interrupt */
2028                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2029
2030                         intr->mask_orig = intr->mask;
2031                         /* only disable all misc interrupts */
2032                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2033                 }
2034         }
2035
2036         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2037         ngbe_enable_intr(dev);
2038
2039         return 0;
2040 }
2041
2042 /**
2043  * Interrupt handler which shall be registered for alarm callback for delayed
2044  * handling specific interrupt to wait for the stable nic state. As the
2045  * NIC interrupt state is not stable for ngbe after link is just down,
2046  * it needs to wait 4 seconds to get the stable status.
2047  *
2048  * @param param
2049  *  The address of parameter (struct rte_eth_dev *) registered before.
2050  */
2051 static void
2052 ngbe_dev_interrupt_delayed_handler(void *param)
2053 {
2054         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2055         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2056         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2057         uint32_t eicr;
2058
2059         ngbe_disable_intr(hw);
2060
2061         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2062
2063         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2064                 ngbe_dev_link_update(dev, 0);
2065                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2066                 ngbe_dev_link_status_print(dev);
2067                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2068                                               NULL);
2069         }
2070
2071         if (intr->flags & NGBE_FLAG_MACSEC) {
2072                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2073                                               NULL);
2074                 intr->flags &= ~NGBE_FLAG_MACSEC;
2075         }
2076
2077         /* restore original mask */
2078         intr->mask_misc = intr->mask_misc_orig;
2079         intr->mask_misc_orig = 0;
2080         intr->mask = intr->mask_orig;
2081         intr->mask_orig = 0;
2082
2083         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2084         ngbe_enable_intr(dev);
2085 }
2086
2087 /**
2088  * Interrupt handler triggered by NIC  for handling
2089  * specific interrupt.
2090  *
2091  * @param param
2092  *  The address of parameter (struct rte_eth_dev *) registered before.
2093  */
2094 static void
2095 ngbe_dev_interrupt_handler(void *param)
2096 {
2097         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2098
2099         ngbe_dev_interrupt_get_status(dev);
2100         ngbe_dev_interrupt_action(dev);
2101 }
2102
2103 static int
2104 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2105 {
2106         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2107         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2108         struct rte_eth_dev_data *dev_data = dev->data;
2109
2110         /* If device is started, refuse mtu that requires the support of
2111          * scattered packets when this feature has not been enabled before.
2112          */
2113         if (dev_data->dev_started && !dev_data->scattered_rx &&
2114             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2115              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2116                 PMD_INIT_LOG(ERR, "Stop port first.");
2117                 return -EINVAL;
2118         }
2119
2120         if (hw->mode)
2121                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2122                         NGBE_FRAME_SIZE_MAX);
2123         else
2124                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2125                         NGBE_FRMSZ_MAX(frame_size));
2126
2127         return 0;
2128 }
2129
2130 /**
2131  * Set the IVAR registers, mapping interrupt causes to vectors
2132  * @param hw
2133  *  pointer to ngbe_hw struct
2134  * @direction
2135  *  0 for Rx, 1 for Tx, -1 for other causes
2136  * @queue
2137  *  queue to map the corresponding interrupt to
2138  * @msix_vector
2139  *  the vector to map to the corresponding queue
2140  */
2141 void
2142 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2143                    uint8_t queue, uint8_t msix_vector)
2144 {
2145         uint32_t tmp, idx;
2146
2147         if (direction == -1) {
2148                 /* other causes */
2149                 msix_vector |= NGBE_IVARMISC_VLD;
2150                 idx = 0;
2151                 tmp = rd32(hw, NGBE_IVARMISC);
2152                 tmp &= ~(0xFF << idx);
2153                 tmp |= (msix_vector << idx);
2154                 wr32(hw, NGBE_IVARMISC, tmp);
2155         } else {
2156                 /* rx or tx causes */
2157                 /* Workround for ICR lost */
2158                 idx = ((16 * (queue & 1)) + (8 * direction));
2159                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2160                 tmp &= ~(0xFF << idx);
2161                 tmp |= (msix_vector << idx);
2162                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2163         }
2164 }
2165
2166 /**
2167  * Sets up the hardware to properly generate MSI-X interrupts
2168  * @hw
2169  *  board private structure
2170  */
2171 static void
2172 ngbe_configure_msix(struct rte_eth_dev *dev)
2173 {
2174         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2175         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2176         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2177         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2178         uint32_t vec = NGBE_MISC_VEC_ID;
2179         uint32_t gpie;
2180
2181         /*
2182          * Won't configure MSI-X register if no mapping is done
2183          * between intr vector and event fd
2184          * but if MSI-X has been enabled already, need to configure
2185          * auto clean, auto mask and throttling.
2186          */
2187         gpie = rd32(hw, NGBE_GPIE);
2188         if (!rte_intr_dp_is_en(intr_handle) &&
2189             !(gpie & NGBE_GPIE_MSIX))
2190                 return;
2191
2192         if (rte_intr_allow_others(intr_handle)) {
2193                 base = NGBE_RX_VEC_START;
2194                 vec = base;
2195         }
2196
2197         /* setup GPIE for MSI-X mode */
2198         gpie = rd32(hw, NGBE_GPIE);
2199         gpie |= NGBE_GPIE_MSIX;
2200         wr32(hw, NGBE_GPIE, gpie);
2201
2202         /* Populate the IVAR table and set the ITR values to the
2203          * corresponding register.
2204          */
2205         if (rte_intr_dp_is_en(intr_handle)) {
2206                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2207                         queue_id++) {
2208                         /* by default, 1:1 mapping */
2209                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2210                         rte_intr_vec_list_index_set(intr_handle,
2211                                                            queue_id, vec);
2212                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2213                             - 1)
2214                                 vec++;
2215                 }
2216
2217                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2218         }
2219         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2220                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2221                         | NGBE_ITR_WRDSA);
2222 }
2223
2224 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2225         .dev_configure              = ngbe_dev_configure,
2226         .dev_infos_get              = ngbe_dev_info_get,
2227         .dev_start                  = ngbe_dev_start,
2228         .dev_stop                   = ngbe_dev_stop,
2229         .dev_close                  = ngbe_dev_close,
2230         .dev_reset                  = ngbe_dev_reset,
2231         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2232         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2233         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2234         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2235         .link_update                = ngbe_dev_link_update,
2236         .stats_get                  = ngbe_dev_stats_get,
2237         .xstats_get                 = ngbe_dev_xstats_get,
2238         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2239         .stats_reset                = ngbe_dev_stats_reset,
2240         .xstats_reset               = ngbe_dev_xstats_reset,
2241         .xstats_get_names           = ngbe_dev_xstats_get_names,
2242         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2243         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2244         .mtu_set                    = ngbe_dev_mtu_set,
2245         .vlan_filter_set            = ngbe_vlan_filter_set,
2246         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2247         .vlan_offload_set           = ngbe_vlan_offload_set,
2248         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2249         .rx_queue_start             = ngbe_dev_rx_queue_start,
2250         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2251         .tx_queue_start             = ngbe_dev_tx_queue_start,
2252         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2253         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2254         .rx_queue_release           = ngbe_dev_rx_queue_release,
2255         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2256         .tx_queue_release           = ngbe_dev_tx_queue_release,
2257         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2258         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2259 };
2260
2261 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2262 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2263 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2264
2265 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2266 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2267
2268 #ifdef RTE_ETHDEV_DEBUG_RX
2269         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2270 #endif
2271 #ifdef RTE_ETHDEV_DEBUG_TX
2272         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2273 #endif