net/ngbe: support FW version query
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
88 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
89 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
90         /* MNG RxTx */
91         HW_XSTAT(mng_bmc2host_packets),
92         HW_XSTAT(mng_host2bmc_packets),
93         /* Basic RxTx */
94         HW_XSTAT(rx_packets),
95         HW_XSTAT(tx_packets),
96         HW_XSTAT(rx_bytes),
97         HW_XSTAT(tx_bytes),
98         HW_XSTAT(rx_total_bytes),
99         HW_XSTAT(rx_total_packets),
100         HW_XSTAT(tx_total_packets),
101         HW_XSTAT(rx_total_missed_packets),
102         HW_XSTAT(rx_broadcast_packets),
103         HW_XSTAT(rx_multicast_packets),
104         HW_XSTAT(rx_management_packets),
105         HW_XSTAT(tx_management_packets),
106         HW_XSTAT(rx_management_dropped),
107
108         /* Basic Error */
109         HW_XSTAT(rx_crc_errors),
110         HW_XSTAT(rx_illegal_byte_errors),
111         HW_XSTAT(rx_error_bytes),
112         HW_XSTAT(rx_mac_short_packet_dropped),
113         HW_XSTAT(rx_length_errors),
114         HW_XSTAT(rx_undersize_errors),
115         HW_XSTAT(rx_fragment_errors),
116         HW_XSTAT(rx_oversize_errors),
117         HW_XSTAT(rx_jabber_errors),
118         HW_XSTAT(rx_l3_l4_xsum_error),
119         HW_XSTAT(mac_local_errors),
120         HW_XSTAT(mac_remote_errors),
121
122         /* MACSEC */
123         HW_XSTAT(tx_macsec_pkts_untagged),
124         HW_XSTAT(tx_macsec_pkts_encrypted),
125         HW_XSTAT(tx_macsec_pkts_protected),
126         HW_XSTAT(tx_macsec_octets_encrypted),
127         HW_XSTAT(tx_macsec_octets_protected),
128         HW_XSTAT(rx_macsec_pkts_untagged),
129         HW_XSTAT(rx_macsec_pkts_badtag),
130         HW_XSTAT(rx_macsec_pkts_nosci),
131         HW_XSTAT(rx_macsec_pkts_unknownsci),
132         HW_XSTAT(rx_macsec_octets_decrypted),
133         HW_XSTAT(rx_macsec_octets_validated),
134         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135         HW_XSTAT(rx_macsec_sc_pkts_delayed),
136         HW_XSTAT(rx_macsec_sc_pkts_late),
137         HW_XSTAT(rx_macsec_sa_pkts_ok),
138         HW_XSTAT(rx_macsec_sa_pkts_invalid),
139         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
142
143         /* MAC RxTx */
144         HW_XSTAT(rx_size_64_packets),
145         HW_XSTAT(rx_size_65_to_127_packets),
146         HW_XSTAT(rx_size_128_to_255_packets),
147         HW_XSTAT(rx_size_256_to_511_packets),
148         HW_XSTAT(rx_size_512_to_1023_packets),
149         HW_XSTAT(rx_size_1024_to_max_packets),
150         HW_XSTAT(tx_size_64_packets),
151         HW_XSTAT(tx_size_65_to_127_packets),
152         HW_XSTAT(tx_size_128_to_255_packets),
153         HW_XSTAT(tx_size_256_to_511_packets),
154         HW_XSTAT(tx_size_512_to_1023_packets),
155         HW_XSTAT(tx_size_1024_to_max_packets),
156
157         /* Flow Control */
158         HW_XSTAT(tx_xon_packets),
159         HW_XSTAT(rx_xon_packets),
160         HW_XSTAT(tx_xoff_packets),
161         HW_XSTAT(rx_xoff_packets),
162
163         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
167 };
168
169 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
170                            sizeof(rte_ngbe_stats_strings[0]))
171
172 /* Per-queue statistics */
173 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
174 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
175         QP_XSTAT(rx_qp_packets),
176         QP_XSTAT(tx_qp_packets),
177         QP_XSTAT(rx_qp_bytes),
178         QP_XSTAT(tx_qp_bytes),
179         QP_XSTAT(rx_qp_mc_packets),
180 };
181
182 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
183                            sizeof(rte_ngbe_qp_strings[0]))
184
185 static inline int32_t
186 ngbe_pf_reset_hw(struct ngbe_hw *hw)
187 {
188         uint32_t ctrl_ext;
189         int32_t status;
190
191         status = hw->mac.reset_hw(hw);
192
193         ctrl_ext = rd32(hw, NGBE_PORTCTL);
194         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
195         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
196         wr32(hw, NGBE_PORTCTL, ctrl_ext);
197         ngbe_flush(hw);
198
199         if (status == NGBE_ERR_SFP_NOT_PRESENT)
200                 status = 0;
201         return status;
202 }
203
204 static inline void
205 ngbe_enable_intr(struct rte_eth_dev *dev)
206 {
207         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
208         struct ngbe_hw *hw = ngbe_dev_hw(dev);
209
210         wr32(hw, NGBE_IENMISC, intr->mask_misc);
211         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
212         ngbe_flush(hw);
213 }
214
215 static void
216 ngbe_disable_intr(struct ngbe_hw *hw)
217 {
218         PMD_INIT_FUNC_TRACE();
219
220         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
221         ngbe_flush(hw);
222 }
223
224 /*
225  * Ensure that all locks are released before first NVM or PHY access
226  */
227 static void
228 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
229 {
230         uint16_t mask;
231
232         /*
233          * These ones are more tricky since they are common to all ports; but
234          * swfw_sync retries last long enough (1s) to be almost sure that if
235          * lock can not be taken it is due to an improper lock of the
236          * semaphore.
237          */
238         mask = NGBE_MNGSEM_SWPHY |
239                NGBE_MNGSEM_SWMBX |
240                NGBE_MNGSEM_SWFLASH;
241         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
242                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
243
244         hw->mac.release_swfw_sync(hw, mask);
245 }
246
247 static int
248 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
249 {
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
251         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
252         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
253         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
254         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
255         const struct rte_memzone *mz;
256         uint32_t ctrl_ext;
257         int err;
258
259         PMD_INIT_FUNC_TRACE();
260
261         eth_dev->dev_ops = &ngbe_eth_dev_ops;
262         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
263         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
264         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
265
266         /*
267          * For secondary processes, we don't initialise any further as primary
268          * has already done this work. Only check we don't need a different
269          * Rx and Tx function.
270          */
271         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
272                 struct ngbe_tx_queue *txq;
273                 /* Tx queue function in primary, set by last queue initialized
274                  * Tx queue may not initialized by primary process
275                  */
276                 if (eth_dev->data->tx_queues) {
277                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
278                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
279                         ngbe_set_tx_function(eth_dev, txq);
280                 } else {
281                         /* Use default Tx function if we get here */
282                         PMD_INIT_LOG(NOTICE,
283                                 "No Tx queues configured yet. Using default Tx function.");
284                 }
285
286                 ngbe_set_rx_function(eth_dev);
287
288                 return 0;
289         }
290
291         rte_eth_copy_pci_info(eth_dev, pci_dev);
292         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
293
294         /* Vendor and Device ID need to be set before init of shared code */
295         hw->device_id = pci_dev->id.device_id;
296         hw->vendor_id = pci_dev->id.vendor_id;
297         hw->sub_system_id = pci_dev->id.subsystem_device_id;
298         ngbe_map_device_id(hw);
299         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
300
301         /* Reserve memory for interrupt status block */
302         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
303                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
304         if (mz == NULL)
305                 return -ENOMEM;
306
307         hw->isb_dma = TMZ_PADDR(mz);
308         hw->isb_mem = TMZ_VADDR(mz);
309
310         /* Initialize the shared code (base driver) */
311         err = ngbe_init_shared_code(hw);
312         if (err != 0) {
313                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
314                 return -EIO;
315         }
316
317         /* Unlock any pending hardware semaphore */
318         ngbe_swfw_lock_reset(hw);
319
320         err = hw->rom.init_params(hw);
321         if (err != 0) {
322                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
323                 return -EIO;
324         }
325
326         /* Make sure we have a good EEPROM before we read from it */
327         err = hw->rom.validate_checksum(hw, NULL);
328         if (err != 0) {
329                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
330                 return -EIO;
331         }
332
333         err = hw->mac.init_hw(hw);
334         if (err != 0) {
335                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
336                 return -EIO;
337         }
338
339         /* Reset the hw statistics */
340         ngbe_dev_stats_reset(eth_dev);
341
342         /* disable interrupt */
343         ngbe_disable_intr(hw);
344
345         /* Allocate memory for storing MAC addresses */
346         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
347                                                hw->mac.num_rar_entries, 0);
348         if (eth_dev->data->mac_addrs == NULL) {
349                 PMD_INIT_LOG(ERR,
350                              "Failed to allocate %u bytes needed to store MAC addresses",
351                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
352                 return -ENOMEM;
353         }
354
355         /* Copy the permanent MAC address */
356         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
357                         &eth_dev->data->mac_addrs[0]);
358
359         /* Allocate memory for storing hash filter MAC addresses */
360         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
361                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
362         if (eth_dev->data->hash_mac_addrs == NULL) {
363                 PMD_INIT_LOG(ERR,
364                              "Failed to allocate %d bytes needed to store MAC addresses",
365                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
366                 rte_free(eth_dev->data->mac_addrs);
367                 eth_dev->data->mac_addrs = NULL;
368                 return -ENOMEM;
369         }
370
371         /* initialize the vfta */
372         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
373
374         /* initialize the hw strip bitmap*/
375         memset(hwstrip, 0, sizeof(*hwstrip));
376
377         ctrl_ext = rd32(hw, NGBE_PORTCTL);
378         /* let hardware know driver is loaded */
379         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
380         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
382         wr32(hw, NGBE_PORTCTL, ctrl_ext);
383         ngbe_flush(hw);
384
385         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
386                         (int)hw->mac.type, (int)hw->phy.type);
387
388         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
389                      eth_dev->data->port_id, pci_dev->id.vendor_id,
390                      pci_dev->id.device_id);
391
392         rte_intr_callback_register(intr_handle,
393                                    ngbe_dev_interrupt_handler, eth_dev);
394
395         /* enable uio/vfio intr/eventfd mapping */
396         rte_intr_enable(intr_handle);
397
398         /* enable support intr */
399         ngbe_enable_intr(eth_dev);
400
401         return 0;
402 }
403
404 static int
405 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
406 {
407         PMD_INIT_FUNC_TRACE();
408
409         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410                 return 0;
411
412         ngbe_dev_close(eth_dev);
413
414         return 0;
415 }
416
417 static int
418 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
419                 struct rte_pci_device *pci_dev)
420 {
421         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
422                         sizeof(struct ngbe_adapter),
423                         eth_dev_pci_specific_init, pci_dev,
424                         eth_ngbe_dev_init, NULL);
425 }
426
427 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
428 {
429         struct rte_eth_dev *ethdev;
430
431         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
432         if (ethdev == NULL)
433                 return 0;
434
435         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
436 }
437
438 static struct rte_pci_driver rte_ngbe_pmd = {
439         .id_table = pci_id_ngbe_map,
440         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
441                      RTE_PCI_DRV_INTR_LSC,
442         .probe = eth_ngbe_pci_probe,
443         .remove = eth_ngbe_pci_remove,
444 };
445
446 static int
447 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
448 {
449         struct ngbe_hw *hw = ngbe_dev_hw(dev);
450         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
451         uint32_t vfta;
452         uint32_t vid_idx;
453         uint32_t vid_bit;
454
455         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
456         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
457         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
458         if (on)
459                 vfta |= vid_bit;
460         else
461                 vfta &= ~vid_bit;
462         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
463
464         /* update local VFTA copy */
465         shadow_vfta->vfta[vid_idx] = vfta;
466
467         return 0;
468 }
469
470 static void
471 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
472 {
473         struct ngbe_hw *hw = ngbe_dev_hw(dev);
474         struct ngbe_rx_queue *rxq;
475         bool restart;
476         uint32_t rxcfg, rxbal, rxbah;
477
478         if (on)
479                 ngbe_vlan_hw_strip_enable(dev, queue);
480         else
481                 ngbe_vlan_hw_strip_disable(dev, queue);
482
483         rxq = dev->data->rx_queues[queue];
484         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
485         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
486         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
487         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
488                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
489                         !(rxcfg & NGBE_RXCFG_VLAN);
490                 rxcfg |= NGBE_RXCFG_VLAN;
491         } else {
492                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
493                         (rxcfg & NGBE_RXCFG_VLAN);
494                 rxcfg &= ~NGBE_RXCFG_VLAN;
495         }
496         rxcfg &= ~NGBE_RXCFG_ENA;
497
498         if (restart) {
499                 /* set vlan strip for ring */
500                 ngbe_dev_rx_queue_stop(dev, queue);
501                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
502                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
503                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
504                 ngbe_dev_rx_queue_start(dev, queue);
505         }
506 }
507
508 static int
509 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
510                     enum rte_vlan_type vlan_type,
511                     uint16_t tpid)
512 {
513         struct ngbe_hw *hw = ngbe_dev_hw(dev);
514         int ret = 0;
515         uint32_t portctrl, vlan_ext, qinq;
516
517         portctrl = rd32(hw, NGBE_PORTCTL);
518
519         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
520         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
521         switch (vlan_type) {
522         case RTE_ETH_VLAN_TYPE_INNER:
523                 if (vlan_ext) {
524                         wr32m(hw, NGBE_VLANCTL,
525                                 NGBE_VLANCTL_TPID_MASK,
526                                 NGBE_VLANCTL_TPID(tpid));
527                         wr32m(hw, NGBE_DMATXCTRL,
528                                 NGBE_DMATXCTRL_TPID_MASK,
529                                 NGBE_DMATXCTRL_TPID(tpid));
530                 } else {
531                         ret = -ENOTSUP;
532                         PMD_DRV_LOG(ERR,
533                                 "Inner type is not supported by single VLAN");
534                 }
535
536                 if (qinq) {
537                         wr32m(hw, NGBE_TAGTPID(0),
538                                 NGBE_TAGTPID_LSB_MASK,
539                                 NGBE_TAGTPID_LSB(tpid));
540                 }
541                 break;
542         case RTE_ETH_VLAN_TYPE_OUTER:
543                 if (vlan_ext) {
544                         /* Only the high 16-bits is valid */
545                         wr32m(hw, NGBE_EXTAG,
546                                 NGBE_EXTAG_VLAN_MASK,
547                                 NGBE_EXTAG_VLAN(tpid));
548                 } else {
549                         wr32m(hw, NGBE_VLANCTL,
550                                 NGBE_VLANCTL_TPID_MASK,
551                                 NGBE_VLANCTL_TPID(tpid));
552                         wr32m(hw, NGBE_DMATXCTRL,
553                                 NGBE_DMATXCTRL_TPID_MASK,
554                                 NGBE_DMATXCTRL_TPID(tpid));
555                 }
556
557                 if (qinq) {
558                         wr32m(hw, NGBE_TAGTPID(0),
559                                 NGBE_TAGTPID_MSB_MASK,
560                                 NGBE_TAGTPID_MSB(tpid));
561                 }
562                 break;
563         default:
564                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
565                 return -EINVAL;
566         }
567
568         return ret;
569 }
570
571 void
572 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
573 {
574         struct ngbe_hw *hw = ngbe_dev_hw(dev);
575         uint32_t vlnctrl;
576
577         PMD_INIT_FUNC_TRACE();
578
579         /* Filter Table Disable */
580         vlnctrl = rd32(hw, NGBE_VLANCTL);
581         vlnctrl &= ~NGBE_VLANCTL_VFE;
582         wr32(hw, NGBE_VLANCTL, vlnctrl);
583 }
584
585 void
586 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
587 {
588         struct ngbe_hw *hw = ngbe_dev_hw(dev);
589         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
590         uint32_t vlnctrl;
591         uint16_t i;
592
593         PMD_INIT_FUNC_TRACE();
594
595         /* Filter Table Enable */
596         vlnctrl = rd32(hw, NGBE_VLANCTL);
597         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
598         vlnctrl |= NGBE_VLANCTL_VFE;
599         wr32(hw, NGBE_VLANCTL, vlnctrl);
600
601         /* write whatever is in local vfta copy */
602         for (i = 0; i < NGBE_VFTA_SIZE; i++)
603                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
604 }
605
606 void
607 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
608 {
609         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
610         struct ngbe_rx_queue *rxq;
611
612         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
613                 return;
614
615         if (on)
616                 NGBE_SET_HWSTRIP(hwstrip, queue);
617         else
618                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
619
620         if (queue >= dev->data->nb_rx_queues)
621                 return;
622
623         rxq = dev->data->rx_queues[queue];
624
625         if (on) {
626                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
627                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
628         } else {
629                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
630                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
631         }
632 }
633
634 static void
635 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
636 {
637         struct ngbe_hw *hw = ngbe_dev_hw(dev);
638         uint32_t ctrl;
639
640         PMD_INIT_FUNC_TRACE();
641
642         ctrl = rd32(hw, NGBE_RXCFG(queue));
643         ctrl &= ~NGBE_RXCFG_VLAN;
644         wr32(hw, NGBE_RXCFG(queue), ctrl);
645
646         /* record those setting for HW strip per queue */
647         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
648 }
649
650 static void
651 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
652 {
653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
654         uint32_t ctrl;
655
656         PMD_INIT_FUNC_TRACE();
657
658         ctrl = rd32(hw, NGBE_RXCFG(queue));
659         ctrl |= NGBE_RXCFG_VLAN;
660         wr32(hw, NGBE_RXCFG(queue), ctrl);
661
662         /* record those setting for HW strip per queue */
663         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
664 }
665
666 static void
667 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
668 {
669         struct ngbe_hw *hw = ngbe_dev_hw(dev);
670         uint32_t ctrl;
671
672         PMD_INIT_FUNC_TRACE();
673
674         ctrl = rd32(hw, NGBE_PORTCTL);
675         ctrl &= ~NGBE_PORTCTL_VLANEXT;
676         ctrl &= ~NGBE_PORTCTL_QINQ;
677         wr32(hw, NGBE_PORTCTL, ctrl);
678 }
679
680 static void
681 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
682 {
683         struct ngbe_hw *hw = ngbe_dev_hw(dev);
684         uint32_t ctrl;
685
686         PMD_INIT_FUNC_TRACE();
687
688         ctrl  = rd32(hw, NGBE_PORTCTL);
689         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
690         wr32(hw, NGBE_PORTCTL, ctrl);
691 }
692
693 static void
694 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
695 {
696         struct ngbe_hw *hw = ngbe_dev_hw(dev);
697         uint32_t ctrl;
698
699         PMD_INIT_FUNC_TRACE();
700
701         ctrl = rd32(hw, NGBE_PORTCTL);
702         ctrl &= ~NGBE_PORTCTL_QINQ;
703         wr32(hw, NGBE_PORTCTL, ctrl);
704 }
705
706 static void
707 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
708 {
709         struct ngbe_hw *hw = ngbe_dev_hw(dev);
710         uint32_t ctrl;
711
712         PMD_INIT_FUNC_TRACE();
713
714         ctrl  = rd32(hw, NGBE_PORTCTL);
715         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
716         wr32(hw, NGBE_PORTCTL, ctrl);
717 }
718
719 void
720 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
721 {
722         struct ngbe_rx_queue *rxq;
723         uint16_t i;
724
725         PMD_INIT_FUNC_TRACE();
726
727         for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                 rxq = dev->data->rx_queues[i];
729
730                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
731                         ngbe_vlan_hw_strip_enable(dev, i);
732                 else
733                         ngbe_vlan_hw_strip_disable(dev, i);
734         }
735 }
736
737 void
738 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
739 {
740         uint16_t i;
741         struct rte_eth_rxmode *rxmode;
742         struct ngbe_rx_queue *rxq;
743
744         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
745                 rxmode = &dev->data->dev_conf.rxmode;
746                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
747                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
748                                 rxq = dev->data->rx_queues[i];
749                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
750                         }
751                 else
752                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
753                                 rxq = dev->data->rx_queues[i];
754                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
755                         }
756         }
757 }
758
759 static int
760 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
761 {
762         struct rte_eth_rxmode *rxmode;
763         rxmode = &dev->data->dev_conf.rxmode;
764
765         if (mask & RTE_ETH_VLAN_STRIP_MASK)
766                 ngbe_vlan_hw_strip_config(dev);
767
768         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
769                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
770                         ngbe_vlan_hw_filter_enable(dev);
771                 else
772                         ngbe_vlan_hw_filter_disable(dev);
773         }
774
775         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
776                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
777                         ngbe_vlan_hw_extend_enable(dev);
778                 else
779                         ngbe_vlan_hw_extend_disable(dev);
780         }
781
782         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
783                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
784                         ngbe_qinq_hw_strip_enable(dev);
785                 else
786                         ngbe_qinq_hw_strip_disable(dev);
787         }
788
789         return 0;
790 }
791
792 static int
793 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
794 {
795         ngbe_config_vlan_strip_on_all_queues(dev, mask);
796
797         ngbe_vlan_offload_config(dev, mask);
798
799         return 0;
800 }
801
802 static int
803 ngbe_dev_configure(struct rte_eth_dev *dev)
804 {
805         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
806         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
807
808         PMD_INIT_FUNC_TRACE();
809
810         /* set flag to update link status after init */
811         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
812
813         /*
814          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
815          * allocation Rx preconditions we will reset it.
816          */
817         adapter->rx_bulk_alloc_allowed = true;
818
819         return 0;
820 }
821
822 static void
823 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
824 {
825         struct ngbe_hw *hw = ngbe_dev_hw(dev);
826         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
827
828         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
829         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
830         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
831         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
832                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
833         else
834                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
835
836         intr->mask_misc |= NGBE_ICRMISC_GPIO;
837 }
838
839 /*
840  * Configure device link speed and setup link.
841  * It returns 0 on success.
842  */
843 static int
844 ngbe_dev_start(struct rte_eth_dev *dev)
845 {
846         struct ngbe_hw *hw = ngbe_dev_hw(dev);
847         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
848         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
849         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
850         uint32_t intr_vector = 0;
851         int err;
852         bool link_up = false, negotiate = false;
853         uint32_t speed = 0;
854         uint32_t allowed_speeds = 0;
855         int mask = 0;
856         int status;
857         uint32_t *link_speeds;
858
859         PMD_INIT_FUNC_TRACE();
860
861         /* disable uio/vfio intr/eventfd mapping */
862         rte_intr_disable(intr_handle);
863
864         /* stop adapter */
865         hw->adapter_stopped = 0;
866         ngbe_stop_hw(hw);
867
868         /* reinitialize adapter, this calls reset and start */
869         hw->nb_rx_queues = dev->data->nb_rx_queues;
870         hw->nb_tx_queues = dev->data->nb_tx_queues;
871         status = ngbe_pf_reset_hw(hw);
872         if (status != 0)
873                 return -1;
874         hw->mac.start_hw(hw);
875         hw->mac.get_link_status = true;
876
877         ngbe_dev_phy_intr_setup(dev);
878
879         /* check and configure queue intr-vector mapping */
880         if ((rte_intr_cap_multiple(intr_handle) ||
881              !RTE_ETH_DEV_SRIOV(dev).active) &&
882             dev->data->dev_conf.intr_conf.rxq != 0) {
883                 intr_vector = dev->data->nb_rx_queues;
884                 if (rte_intr_efd_enable(intr_handle, intr_vector))
885                         return -1;
886         }
887
888         if (rte_intr_dp_is_en(intr_handle)) {
889                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
890                                                    dev->data->nb_rx_queues)) {
891                         PMD_INIT_LOG(ERR,
892                                      "Failed to allocate %d rx_queues intr_vec",
893                                      dev->data->nb_rx_queues);
894                         return -ENOMEM;
895                 }
896         }
897
898         /* confiugre MSI-X for sleep until Rx interrupt */
899         ngbe_configure_msix(dev);
900
901         /* initialize transmission unit */
902         ngbe_dev_tx_init(dev);
903
904         /* This can fail when allocating mbufs for descriptor rings */
905         err = ngbe_dev_rx_init(dev);
906         if (err != 0) {
907                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
908                 goto error;
909         }
910
911         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
912                 RTE_ETH_VLAN_EXTEND_MASK;
913         err = ngbe_vlan_offload_config(dev, mask);
914         if (err != 0) {
915                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
916                 goto error;
917         }
918
919         ngbe_configure_port(dev);
920
921         err = ngbe_dev_rxtx_start(dev);
922         if (err < 0) {
923                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
924                 goto error;
925         }
926
927         err = hw->mac.check_link(hw, &speed, &link_up, 0);
928         if (err != 0)
929                 goto error;
930         dev->data->dev_link.link_status = link_up;
931
932         link_speeds = &dev->data->dev_conf.link_speeds;
933         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
934                 negotiate = true;
935
936         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
937         if (err != 0)
938                 goto error;
939
940         allowed_speeds = 0;
941         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
942                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
943         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
944                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
945         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
946                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
947
948         if (*link_speeds & ~allowed_speeds) {
949                 PMD_INIT_LOG(ERR, "Invalid link setting");
950                 goto error;
951         }
952
953         speed = 0x0;
954         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
955                 speed = hw->mac.default_speeds;
956         } else {
957                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
958                         speed |= NGBE_LINK_SPEED_1GB_FULL;
959                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
960                         speed |= NGBE_LINK_SPEED_100M_FULL;
961                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
962                         speed |= NGBE_LINK_SPEED_10M_FULL;
963         }
964
965         hw->phy.init_hw(hw);
966         err = hw->mac.setup_link(hw, speed, link_up);
967         if (err != 0)
968                 goto error;
969
970         if (rte_intr_allow_others(intr_handle)) {
971                 ngbe_dev_misc_interrupt_setup(dev);
972                 /* check if lsc interrupt is enabled */
973                 if (dev->data->dev_conf.intr_conf.lsc != 0)
974                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
975                 else
976                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
977                 ngbe_dev_macsec_interrupt_setup(dev);
978                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
979         } else {
980                 rte_intr_callback_unregister(intr_handle,
981                                              ngbe_dev_interrupt_handler, dev);
982                 if (dev->data->dev_conf.intr_conf.lsc != 0)
983                         PMD_INIT_LOG(INFO,
984                                      "LSC won't enable because of no intr multiplex");
985         }
986
987         /* check if rxq interrupt is enabled */
988         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
989             rte_intr_dp_is_en(intr_handle))
990                 ngbe_dev_rxq_interrupt_setup(dev);
991
992         /* enable UIO/VFIO intr/eventfd mapping */
993         rte_intr_enable(intr_handle);
994
995         /* resume enabled intr since HW reset */
996         ngbe_enable_intr(dev);
997
998         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
999                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1000                 /* gpio0 is used to power on/off control*/
1001                 wr32(hw, NGBE_GPIODATA, 0);
1002         }
1003
1004         /*
1005          * Update link status right before return, because it may
1006          * start link configuration process in a separate thread.
1007          */
1008         ngbe_dev_link_update(dev, 0);
1009
1010         ngbe_read_stats_registers(hw, hw_stats);
1011         hw->offset_loaded = 1;
1012
1013         return 0;
1014
1015 error:
1016         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1017         ngbe_dev_clear_queues(dev);
1018         return -EIO;
1019 }
1020
1021 /*
1022  * Stop device: disable rx and tx functions to allow for reconfiguring.
1023  */
1024 static int
1025 ngbe_dev_stop(struct rte_eth_dev *dev)
1026 {
1027         struct rte_eth_link link;
1028         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1029         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1030         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1031
1032         if (hw->adapter_stopped)
1033                 return 0;
1034
1035         PMD_INIT_FUNC_TRACE();
1036
1037         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1038                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1039                 /* gpio0 is used to power on/off control*/
1040                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1041         }
1042
1043         /* disable interrupts */
1044         ngbe_disable_intr(hw);
1045
1046         /* reset the NIC */
1047         ngbe_pf_reset_hw(hw);
1048         hw->adapter_stopped = 0;
1049
1050         /* stop adapter */
1051         ngbe_stop_hw(hw);
1052
1053         ngbe_dev_clear_queues(dev);
1054
1055         /* Clear stored conf */
1056         dev->data->scattered_rx = 0;
1057
1058         /* Clear recorded link status */
1059         memset(&link, 0, sizeof(link));
1060         rte_eth_linkstatus_set(dev, &link);
1061
1062         if (!rte_intr_allow_others(intr_handle))
1063                 /* resume to the default handler */
1064                 rte_intr_callback_register(intr_handle,
1065                                            ngbe_dev_interrupt_handler,
1066                                            (void *)dev);
1067
1068         /* Clean datapath event and queue/vec mapping */
1069         rte_intr_efd_disable(intr_handle);
1070         rte_intr_vec_list_free(intr_handle);
1071
1072         hw->adapter_stopped = true;
1073         dev->data->dev_started = 0;
1074
1075         return 0;
1076 }
1077
1078 /*
1079  * Reset and stop device.
1080  */
1081 static int
1082 ngbe_dev_close(struct rte_eth_dev *dev)
1083 {
1084         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1085         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1086         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1087         int retries = 0;
1088         int ret;
1089
1090         PMD_INIT_FUNC_TRACE();
1091
1092         ngbe_pf_reset_hw(hw);
1093
1094         ngbe_dev_stop(dev);
1095
1096         ngbe_dev_free_queues(dev);
1097
1098         /* reprogram the RAR[0] in case user changed it. */
1099         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1100
1101         /* Unlock any pending hardware semaphore */
1102         ngbe_swfw_lock_reset(hw);
1103
1104         /* disable uio intr before callback unregister */
1105         rte_intr_disable(intr_handle);
1106
1107         do {
1108                 ret = rte_intr_callback_unregister(intr_handle,
1109                                 ngbe_dev_interrupt_handler, dev);
1110                 if (ret >= 0 || ret == -ENOENT) {
1111                         break;
1112                 } else if (ret != -EAGAIN) {
1113                         PMD_INIT_LOG(ERR,
1114                                 "intr callback unregister failed: %d",
1115                                 ret);
1116                 }
1117                 rte_delay_ms(100);
1118         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1119
1120         rte_free(dev->data->mac_addrs);
1121         dev->data->mac_addrs = NULL;
1122
1123         rte_free(dev->data->hash_mac_addrs);
1124         dev->data->hash_mac_addrs = NULL;
1125
1126         return ret;
1127 }
1128
1129 /*
1130  * Reset PF device.
1131  */
1132 static int
1133 ngbe_dev_reset(struct rte_eth_dev *dev)
1134 {
1135         int ret;
1136
1137         ret = eth_ngbe_dev_uninit(dev);
1138         if (ret != 0)
1139                 return ret;
1140
1141         ret = eth_ngbe_dev_init(dev, NULL);
1142
1143         return ret;
1144 }
1145
1146 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1147         {                                                       \
1148                 uint32_t current_counter = rd32(hw, reg);       \
1149                 if (current_counter < last_counter)             \
1150                         current_counter += 0x100000000LL;       \
1151                 if (!hw->offset_loaded)                         \
1152                         last_counter = current_counter;         \
1153                 counter = current_counter - last_counter;       \
1154                 counter &= 0xFFFFFFFFLL;                        \
1155         }
1156
1157 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1158         {                                                                \
1159                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1160                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1161                 uint64_t current_counter = (current_counter_msb << 32) | \
1162                         current_counter_lsb;                             \
1163                 if (current_counter < last_counter)                      \
1164                         current_counter += 0x1000000000LL;               \
1165                 if (!hw->offset_loaded)                                  \
1166                         last_counter = current_counter;                  \
1167                 counter = current_counter - last_counter;                \
1168                 counter &= 0xFFFFFFFFFLL;                                \
1169         }
1170
1171 void
1172 ngbe_read_stats_registers(struct ngbe_hw *hw,
1173                            struct ngbe_hw_stats *hw_stats)
1174 {
1175         unsigned int i;
1176
1177         /* QP Stats */
1178         for (i = 0; i < hw->nb_rx_queues; i++) {
1179                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1180                         hw->qp_last[i].rx_qp_packets,
1181                         hw_stats->qp[i].rx_qp_packets);
1182                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1183                         hw->qp_last[i].rx_qp_bytes,
1184                         hw_stats->qp[i].rx_qp_bytes);
1185                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1186                         hw->qp_last[i].rx_qp_mc_packets,
1187                         hw_stats->qp[i].rx_qp_mc_packets);
1188                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1189                         hw->qp_last[i].rx_qp_bc_packets,
1190                         hw_stats->qp[i].rx_qp_bc_packets);
1191         }
1192
1193         for (i = 0; i < hw->nb_tx_queues; i++) {
1194                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1195                         hw->qp_last[i].tx_qp_packets,
1196                         hw_stats->qp[i].tx_qp_packets);
1197                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1198                         hw->qp_last[i].tx_qp_bytes,
1199                         hw_stats->qp[i].tx_qp_bytes);
1200                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1201                         hw->qp_last[i].tx_qp_mc_packets,
1202                         hw_stats->qp[i].tx_qp_mc_packets);
1203                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1204                         hw->qp_last[i].tx_qp_bc_packets,
1205                         hw_stats->qp[i].tx_qp_bc_packets);
1206         }
1207
1208         /* PB Stats */
1209         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1210         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1211         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1212         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1213         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1214         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1215
1216         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1217         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1218
1219         /* DMA Stats */
1220         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1221         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1222         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1223         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1224         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1225         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1226         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1227         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1228
1229         /* MAC Stats */
1230         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1231         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1232         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1233
1234         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1235         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1236         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1237
1238         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1239         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1240
1241         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1242         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1243         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1244         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1245         hw_stats->rx_size_512_to_1023_packets +=
1246                         rd64(hw, NGBE_MACRX512TO1023L);
1247         hw_stats->rx_size_1024_to_max_packets +=
1248                         rd64(hw, NGBE_MACRX1024TOMAXL);
1249         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1250         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1251         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1252         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1253         hw_stats->tx_size_512_to_1023_packets +=
1254                         rd64(hw, NGBE_MACTX512TO1023L);
1255         hw_stats->tx_size_1024_to_max_packets +=
1256                         rd64(hw, NGBE_MACTX1024TOMAXL);
1257
1258         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1259         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1260         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1261
1262         /* MNG Stats */
1263         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1264         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1265         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1266         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1267
1268         /* MACsec Stats */
1269         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1270         hw_stats->tx_macsec_pkts_encrypted +=
1271                         rd32(hw, NGBE_LSECTX_ENCPKT);
1272         hw_stats->tx_macsec_pkts_protected +=
1273                         rd32(hw, NGBE_LSECTX_PROTPKT);
1274         hw_stats->tx_macsec_octets_encrypted +=
1275                         rd32(hw, NGBE_LSECTX_ENCOCT);
1276         hw_stats->tx_macsec_octets_protected +=
1277                         rd32(hw, NGBE_LSECTX_PROTOCT);
1278         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1279         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1280         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1281         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1282         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1283         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1284         hw_stats->rx_macsec_sc_pkts_unchecked +=
1285                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1286         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1287         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1288         for (i = 0; i < 2; i++) {
1289                 hw_stats->rx_macsec_sa_pkts_ok +=
1290                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1291                 hw_stats->rx_macsec_sa_pkts_invalid +=
1292                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1293                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1294                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1295         }
1296         for (i = 0; i < 4; i++) {
1297                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1298                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1299                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1300                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1301         }
1302         hw_stats->rx_total_missed_packets =
1303                         hw_stats->rx_up_dropped;
1304 }
1305
1306 static int
1307 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1308 {
1309         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1310         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1311         struct ngbe_stat_mappings *stat_mappings =
1312                         NGBE_DEV_STAT_MAPPINGS(dev);
1313         uint32_t i, j;
1314
1315         ngbe_read_stats_registers(hw, hw_stats);
1316
1317         if (stats == NULL)
1318                 return -EINVAL;
1319
1320         /* Fill out the rte_eth_stats statistics structure */
1321         stats->ipackets = hw_stats->rx_packets;
1322         stats->ibytes = hw_stats->rx_bytes;
1323         stats->opackets = hw_stats->tx_packets;
1324         stats->obytes = hw_stats->tx_bytes;
1325
1326         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1327         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1328         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1329         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1330         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1331         for (i = 0; i < NGBE_MAX_QP; i++) {
1332                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1333                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1334                 uint32_t q_map;
1335
1336                 q_map = (stat_mappings->rqsm[n] >> offset)
1337                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1338                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1339                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1340                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1341                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1342
1343                 q_map = (stat_mappings->tqsm[n] >> offset)
1344                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1345                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1346                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1347                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1348                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1349         }
1350
1351         /* Rx Errors */
1352         stats->imissed  = hw_stats->rx_total_missed_packets +
1353                           hw_stats->rx_dma_drop;
1354         stats->ierrors  = hw_stats->rx_crc_errors +
1355                           hw_stats->rx_mac_short_packet_dropped +
1356                           hw_stats->rx_length_errors +
1357                           hw_stats->rx_undersize_errors +
1358                           hw_stats->rx_oversize_errors +
1359                           hw_stats->rx_illegal_byte_errors +
1360                           hw_stats->rx_error_bytes +
1361                           hw_stats->rx_fragment_errors;
1362
1363         /* Tx Errors */
1364         stats->oerrors  = 0;
1365         return 0;
1366 }
1367
1368 static int
1369 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1370 {
1371         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1372         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1373
1374         /* HW registers are cleared on read */
1375         hw->offset_loaded = 0;
1376         ngbe_dev_stats_get(dev, NULL);
1377         hw->offset_loaded = 1;
1378
1379         /* Reset software totals */
1380         memset(hw_stats, 0, sizeof(*hw_stats));
1381
1382         return 0;
1383 }
1384
1385 /* This function calculates the number of xstats based on the current config */
1386 static unsigned
1387 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1388 {
1389         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1390         return NGBE_NB_HW_STATS +
1391                NGBE_NB_QP_STATS * nb_queues;
1392 }
1393
1394 static inline int
1395 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1396 {
1397         int nb, st;
1398
1399         /* Extended stats from ngbe_hw_stats */
1400         if (id < NGBE_NB_HW_STATS) {
1401                 snprintf(name, size, "[hw]%s",
1402                         rte_ngbe_stats_strings[id].name);
1403                 return 0;
1404         }
1405         id -= NGBE_NB_HW_STATS;
1406
1407         /* Queue Stats */
1408         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1409                 nb = id / NGBE_NB_QP_STATS;
1410                 st = id % NGBE_NB_QP_STATS;
1411                 snprintf(name, size, "[q%u]%s", nb,
1412                         rte_ngbe_qp_strings[st].name);
1413                 return 0;
1414         }
1415         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1416
1417         return -(int)(id + 1);
1418 }
1419
1420 static inline int
1421 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1422 {
1423         int nb, st;
1424
1425         /* Extended stats from ngbe_hw_stats */
1426         if (id < NGBE_NB_HW_STATS) {
1427                 *offset = rte_ngbe_stats_strings[id].offset;
1428                 return 0;
1429         }
1430         id -= NGBE_NB_HW_STATS;
1431
1432         /* Queue Stats */
1433         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1434                 nb = id / NGBE_NB_QP_STATS;
1435                 st = id % NGBE_NB_QP_STATS;
1436                 *offset = rte_ngbe_qp_strings[st].offset +
1437                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1438                 return 0;
1439         }
1440
1441         return -1;
1442 }
1443
1444 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1445         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1446 {
1447         unsigned int i, count;
1448
1449         count = ngbe_xstats_calc_num(dev);
1450         if (xstats_names == NULL)
1451                 return count;
1452
1453         /* Note: limit >= cnt_stats checked upstream
1454          * in rte_eth_xstats_names()
1455          */
1456         limit = min(limit, count);
1457
1458         /* Extended stats from ngbe_hw_stats */
1459         for (i = 0; i < limit; i++) {
1460                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1461                         sizeof(xstats_names[i].name))) {
1462                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1463                         break;
1464                 }
1465         }
1466
1467         return i;
1468 }
1469
1470 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1471         const uint64_t *ids,
1472         struct rte_eth_xstat_name *xstats_names,
1473         unsigned int limit)
1474 {
1475         unsigned int i;
1476
1477         if (ids == NULL)
1478                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1479
1480         for (i = 0; i < limit; i++) {
1481                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1482                                 sizeof(xstats_names[i].name))) {
1483                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1484                         return -1;
1485                 }
1486         }
1487
1488         return i;
1489 }
1490
1491 static int
1492 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1493                                          unsigned int limit)
1494 {
1495         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1496         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1497         unsigned int i, count;
1498
1499         ngbe_read_stats_registers(hw, hw_stats);
1500
1501         /* If this is a reset xstats is NULL, and we have cleared the
1502          * registers by reading them.
1503          */
1504         count = ngbe_xstats_calc_num(dev);
1505         if (xstats == NULL)
1506                 return count;
1507
1508         limit = min(limit, ngbe_xstats_calc_num(dev));
1509
1510         /* Extended stats from ngbe_hw_stats */
1511         for (i = 0; i < limit; i++) {
1512                 uint32_t offset = 0;
1513
1514                 if (ngbe_get_offset_by_id(i, &offset)) {
1515                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1516                         break;
1517                 }
1518                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1519                 xstats[i].id = i;
1520         }
1521
1522         return i;
1523 }
1524
1525 static int
1526 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1527                                          unsigned int limit)
1528 {
1529         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1530         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1531         unsigned int i, count;
1532
1533         ngbe_read_stats_registers(hw, hw_stats);
1534
1535         /* If this is a reset xstats is NULL, and we have cleared the
1536          * registers by reading them.
1537          */
1538         count = ngbe_xstats_calc_num(dev);
1539         if (values == NULL)
1540                 return count;
1541
1542         limit = min(limit, ngbe_xstats_calc_num(dev));
1543
1544         /* Extended stats from ngbe_hw_stats */
1545         for (i = 0; i < limit; i++) {
1546                 uint32_t offset;
1547
1548                 if (ngbe_get_offset_by_id(i, &offset)) {
1549                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1550                         break;
1551                 }
1552                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1553         }
1554
1555         return i;
1556 }
1557
1558 static int
1559 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1560                 uint64_t *values, unsigned int limit)
1561 {
1562         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1563         unsigned int i;
1564
1565         if (ids == NULL)
1566                 return ngbe_dev_xstats_get_(dev, values, limit);
1567
1568         for (i = 0; i < limit; i++) {
1569                 uint32_t offset;
1570
1571                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1572                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1573                         break;
1574                 }
1575                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1576         }
1577
1578         return i;
1579 }
1580
1581 static int
1582 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1583 {
1584         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1585         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1586
1587         /* HW registers are cleared on read */
1588         hw->offset_loaded = 0;
1589         ngbe_read_stats_registers(hw, hw_stats);
1590         hw->offset_loaded = 1;
1591
1592         /* Reset software totals */
1593         memset(hw_stats, 0, sizeof(*hw_stats));
1594
1595         return 0;
1596 }
1597
1598 static int
1599 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1600 {
1601         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1602         int ret;
1603
1604         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1605
1606         if (ret < 0)
1607                 return -EINVAL;
1608
1609         ret += 1; /* add the size of '\0' */
1610         if (fw_size < (size_t)ret)
1611                 return ret;
1612
1613         return 0;
1614 }
1615
1616 static int
1617 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1618 {
1619         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1620
1621         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1622         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1623         dev_info->min_rx_bufsize = 1024;
1624         dev_info->max_rx_pktlen = 15872;
1625         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1626         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1627                                      dev_info->rx_queue_offload_capa);
1628         dev_info->tx_queue_offload_capa = 0;
1629         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1630
1631         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1632                 .rx_thresh = {
1633                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1634                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1635                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1636                 },
1637                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1638                 .rx_drop_en = 0,
1639                 .offloads = 0,
1640         };
1641
1642         dev_info->default_txconf = (struct rte_eth_txconf) {
1643                 .tx_thresh = {
1644                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1645                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1646                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1647                 },
1648                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1649                 .offloads = 0,
1650         };
1651
1652         dev_info->rx_desc_lim = rx_desc_lim;
1653         dev_info->tx_desc_lim = tx_desc_lim;
1654
1655         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1656                                 RTE_ETH_LINK_SPEED_10M;
1657
1658         /* Driver-preferred Rx/Tx parameters */
1659         dev_info->default_rxportconf.burst_size = 32;
1660         dev_info->default_txportconf.burst_size = 32;
1661         dev_info->default_rxportconf.nb_queues = 1;
1662         dev_info->default_txportconf.nb_queues = 1;
1663         dev_info->default_rxportconf.ring_size = 256;
1664         dev_info->default_txportconf.ring_size = 256;
1665
1666         return 0;
1667 }
1668
1669 const uint32_t *
1670 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1671 {
1672         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1673             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1674             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1675             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1676                 return ngbe_get_supported_ptypes();
1677
1678         return NULL;
1679 }
1680
1681 /* return 0 means link status changed, -1 means not changed */
1682 int
1683 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1684                             int wait_to_complete)
1685 {
1686         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1687         struct rte_eth_link link;
1688         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1689         u32 lan_speed = 0;
1690         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1691         bool link_up;
1692         int err;
1693         int wait = 1;
1694
1695         memset(&link, 0, sizeof(link));
1696         link.link_status = RTE_ETH_LINK_DOWN;
1697         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1698         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1699         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1700                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1701
1702         hw->mac.get_link_status = true;
1703
1704         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1705                 return rte_eth_linkstatus_set(dev, &link);
1706
1707         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1708         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1709                 wait = 0;
1710
1711         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1712         if (err != 0) {
1713                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1714                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1715                 return rte_eth_linkstatus_set(dev, &link);
1716         }
1717
1718         if (!link_up)
1719                 return rte_eth_linkstatus_set(dev, &link);
1720
1721         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1722         link.link_status = RTE_ETH_LINK_UP;
1723         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1724
1725         switch (link_speed) {
1726         default:
1727         case NGBE_LINK_SPEED_UNKNOWN:
1728                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1729                 break;
1730
1731         case NGBE_LINK_SPEED_10M_FULL:
1732                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1733                 lan_speed = 0;
1734                 break;
1735
1736         case NGBE_LINK_SPEED_100M_FULL:
1737                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1738                 lan_speed = 1;
1739                 break;
1740
1741         case NGBE_LINK_SPEED_1GB_FULL:
1742                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1743                 lan_speed = 2;
1744                 break;
1745         }
1746
1747         if (hw->is_pf) {
1748                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1749                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1750                                 NGBE_LINK_SPEED_100M_FULL |
1751                                 NGBE_LINK_SPEED_10M_FULL)) {
1752                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1753                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1754                 }
1755         }
1756
1757         return rte_eth_linkstatus_set(dev, &link);
1758 }
1759
1760 static int
1761 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1762 {
1763         return ngbe_dev_link_update_share(dev, wait_to_complete);
1764 }
1765
1766 static int
1767 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1768 {
1769         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1770         uint32_t fctrl;
1771
1772         fctrl = rd32(hw, NGBE_PSRCTL);
1773         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1774         wr32(hw, NGBE_PSRCTL, fctrl);
1775
1776         return 0;
1777 }
1778
1779 static int
1780 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1781 {
1782         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1783         uint32_t fctrl;
1784
1785         fctrl = rd32(hw, NGBE_PSRCTL);
1786         fctrl &= (~NGBE_PSRCTL_UCP);
1787         if (dev->data->all_multicast == 1)
1788                 fctrl |= NGBE_PSRCTL_MCP;
1789         else
1790                 fctrl &= (~NGBE_PSRCTL_MCP);
1791         wr32(hw, NGBE_PSRCTL, fctrl);
1792
1793         return 0;
1794 }
1795
1796 static int
1797 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1798 {
1799         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1800         uint32_t fctrl;
1801
1802         fctrl = rd32(hw, NGBE_PSRCTL);
1803         fctrl |= NGBE_PSRCTL_MCP;
1804         wr32(hw, NGBE_PSRCTL, fctrl);
1805
1806         return 0;
1807 }
1808
1809 static int
1810 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1811 {
1812         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1813         uint32_t fctrl;
1814
1815         if (dev->data->promiscuous == 1)
1816                 return 0; /* must remain in all_multicast mode */
1817
1818         fctrl = rd32(hw, NGBE_PSRCTL);
1819         fctrl &= (~NGBE_PSRCTL_MCP);
1820         wr32(hw, NGBE_PSRCTL, fctrl);
1821
1822         return 0;
1823 }
1824
1825 /**
1826  * It clears the interrupt causes and enables the interrupt.
1827  * It will be called once only during NIC initialized.
1828  *
1829  * @param dev
1830  *  Pointer to struct rte_eth_dev.
1831  * @param on
1832  *  Enable or Disable.
1833  *
1834  * @return
1835  *  - On success, zero.
1836  *  - On failure, a negative value.
1837  */
1838 static int
1839 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1840 {
1841         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1842
1843         ngbe_dev_link_status_print(dev);
1844         if (on != 0) {
1845                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1846                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1847         } else {
1848                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1849                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1850         }
1851
1852         return 0;
1853 }
1854
1855 /**
1856  * It clears the interrupt causes and enables the interrupt.
1857  * It will be called once only during NIC initialized.
1858  *
1859  * @param dev
1860  *  Pointer to struct rte_eth_dev.
1861  *
1862  * @return
1863  *  - On success, zero.
1864  *  - On failure, a negative value.
1865  */
1866 static int
1867 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1868 {
1869         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1870         u64 mask;
1871
1872         mask = NGBE_ICR_MASK;
1873         mask &= (1ULL << NGBE_MISC_VEC_ID);
1874         intr->mask |= mask;
1875         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1876
1877         return 0;
1878 }
1879
1880 /**
1881  * It clears the interrupt causes and enables the interrupt.
1882  * It will be called once only during NIC initialized.
1883  *
1884  * @param dev
1885  *  Pointer to struct rte_eth_dev.
1886  *
1887  * @return
1888  *  - On success, zero.
1889  *  - On failure, a negative value.
1890  */
1891 static int
1892 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1893 {
1894         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1895         u64 mask;
1896
1897         mask = NGBE_ICR_MASK;
1898         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1899         intr->mask |= mask;
1900
1901         return 0;
1902 }
1903
1904 /**
1905  * It clears the interrupt causes and enables the interrupt.
1906  * It will be called once only during NIC initialized.
1907  *
1908  * @param dev
1909  *  Pointer to struct rte_eth_dev.
1910  *
1911  * @return
1912  *  - On success, zero.
1913  *  - On failure, a negative value.
1914  */
1915 static int
1916 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1917 {
1918         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1919
1920         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1921
1922         return 0;
1923 }
1924
1925 /*
1926  * It reads ICR and sets flag for the link_update.
1927  *
1928  * @param dev
1929  *  Pointer to struct rte_eth_dev.
1930  *
1931  * @return
1932  *  - On success, zero.
1933  *  - On failure, a negative value.
1934  */
1935 static int
1936 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1937 {
1938         uint32_t eicr;
1939         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1940         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1941
1942         /* clear all cause mask */
1943         ngbe_disable_intr(hw);
1944
1945         /* read-on-clear nic registers here */
1946         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1947         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1948
1949         intr->flags = 0;
1950
1951         /* set flag for async link update */
1952         if (eicr & NGBE_ICRMISC_PHY)
1953                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1954
1955         if (eicr & NGBE_ICRMISC_VFMBX)
1956                 intr->flags |= NGBE_FLAG_MAILBOX;
1957
1958         if (eicr & NGBE_ICRMISC_LNKSEC)
1959                 intr->flags |= NGBE_FLAG_MACSEC;
1960
1961         if (eicr & NGBE_ICRMISC_GPIO)
1962                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1963
1964         return 0;
1965 }
1966
1967 /**
1968  * It gets and then prints the link status.
1969  *
1970  * @param dev
1971  *  Pointer to struct rte_eth_dev.
1972  *
1973  * @return
1974  *  - On success, zero.
1975  *  - On failure, a negative value.
1976  */
1977 static void
1978 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1979 {
1980         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1981         struct rte_eth_link link;
1982
1983         rte_eth_linkstatus_get(dev, &link);
1984
1985         if (link.link_status == RTE_ETH_LINK_UP) {
1986                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1987                                         (int)(dev->data->port_id),
1988                                         (unsigned int)link.link_speed,
1989                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1990                                         "full-duplex" : "half-duplex");
1991         } else {
1992                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1993                                 (int)(dev->data->port_id));
1994         }
1995         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1996                                 pci_dev->addr.domain,
1997                                 pci_dev->addr.bus,
1998                                 pci_dev->addr.devid,
1999                                 pci_dev->addr.function);
2000 }
2001
2002 /*
2003  * It executes link_update after knowing an interrupt occurred.
2004  *
2005  * @param dev
2006  *  Pointer to struct rte_eth_dev.
2007  *
2008  * @return
2009  *  - On success, zero.
2010  *  - On failure, a negative value.
2011  */
2012 static int
2013 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2014 {
2015         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2016         int64_t timeout;
2017
2018         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2019
2020         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2021                 struct rte_eth_link link;
2022
2023                 /*get the link status before link update, for predicting later*/
2024                 rte_eth_linkstatus_get(dev, &link);
2025
2026                 ngbe_dev_link_update(dev, 0);
2027
2028                 /* likely to up */
2029                 if (link.link_status != RTE_ETH_LINK_UP)
2030                         /* handle it 1 sec later, wait it being stable */
2031                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2032                 /* likely to down */
2033                 else
2034                         /* handle it 4 sec later, wait it being stable */
2035                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2036
2037                 ngbe_dev_link_status_print(dev);
2038                 if (rte_eal_alarm_set(timeout * 1000,
2039                                       ngbe_dev_interrupt_delayed_handler,
2040                                       (void *)dev) < 0) {
2041                         PMD_DRV_LOG(ERR, "Error setting alarm");
2042                 } else {
2043                         /* remember original mask */
2044                         intr->mask_misc_orig = intr->mask_misc;
2045                         /* only disable lsc interrupt */
2046                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2047
2048                         intr->mask_orig = intr->mask;
2049                         /* only disable all misc interrupts */
2050                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2051                 }
2052         }
2053
2054         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2055         ngbe_enable_intr(dev);
2056
2057         return 0;
2058 }
2059
2060 /**
2061  * Interrupt handler which shall be registered for alarm callback for delayed
2062  * handling specific interrupt to wait for the stable nic state. As the
2063  * NIC interrupt state is not stable for ngbe after link is just down,
2064  * it needs to wait 4 seconds to get the stable status.
2065  *
2066  * @param param
2067  *  The address of parameter (struct rte_eth_dev *) registered before.
2068  */
2069 static void
2070 ngbe_dev_interrupt_delayed_handler(void *param)
2071 {
2072         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2073         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2074         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2075         uint32_t eicr;
2076
2077         ngbe_disable_intr(hw);
2078
2079         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2080
2081         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2082                 ngbe_dev_link_update(dev, 0);
2083                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2084                 ngbe_dev_link_status_print(dev);
2085                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2086                                               NULL);
2087         }
2088
2089         if (intr->flags & NGBE_FLAG_MACSEC) {
2090                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2091                                               NULL);
2092                 intr->flags &= ~NGBE_FLAG_MACSEC;
2093         }
2094
2095         /* restore original mask */
2096         intr->mask_misc = intr->mask_misc_orig;
2097         intr->mask_misc_orig = 0;
2098         intr->mask = intr->mask_orig;
2099         intr->mask_orig = 0;
2100
2101         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2102         ngbe_enable_intr(dev);
2103 }
2104
2105 /**
2106  * Interrupt handler triggered by NIC  for handling
2107  * specific interrupt.
2108  *
2109  * @param param
2110  *  The address of parameter (struct rte_eth_dev *) registered before.
2111  */
2112 static void
2113 ngbe_dev_interrupt_handler(void *param)
2114 {
2115         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2116
2117         ngbe_dev_interrupt_get_status(dev);
2118         ngbe_dev_interrupt_action(dev);
2119 }
2120
2121 static int
2122 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2123 {
2124         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2125         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2126         struct rte_eth_dev_data *dev_data = dev->data;
2127
2128         /* If device is started, refuse mtu that requires the support of
2129          * scattered packets when this feature has not been enabled before.
2130          */
2131         if (dev_data->dev_started && !dev_data->scattered_rx &&
2132             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2133              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2134                 PMD_INIT_LOG(ERR, "Stop port first.");
2135                 return -EINVAL;
2136         }
2137
2138         if (hw->mode)
2139                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2140                         NGBE_FRAME_SIZE_MAX);
2141         else
2142                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2143                         NGBE_FRMSZ_MAX(frame_size));
2144
2145         return 0;
2146 }
2147
2148 /**
2149  * Set the IVAR registers, mapping interrupt causes to vectors
2150  * @param hw
2151  *  pointer to ngbe_hw struct
2152  * @direction
2153  *  0 for Rx, 1 for Tx, -1 for other causes
2154  * @queue
2155  *  queue to map the corresponding interrupt to
2156  * @msix_vector
2157  *  the vector to map to the corresponding queue
2158  */
2159 void
2160 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2161                    uint8_t queue, uint8_t msix_vector)
2162 {
2163         uint32_t tmp, idx;
2164
2165         if (direction == -1) {
2166                 /* other causes */
2167                 msix_vector |= NGBE_IVARMISC_VLD;
2168                 idx = 0;
2169                 tmp = rd32(hw, NGBE_IVARMISC);
2170                 tmp &= ~(0xFF << idx);
2171                 tmp |= (msix_vector << idx);
2172                 wr32(hw, NGBE_IVARMISC, tmp);
2173         } else {
2174                 /* rx or tx causes */
2175                 /* Workround for ICR lost */
2176                 idx = ((16 * (queue & 1)) + (8 * direction));
2177                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2178                 tmp &= ~(0xFF << idx);
2179                 tmp |= (msix_vector << idx);
2180                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2181         }
2182 }
2183
2184 /**
2185  * Sets up the hardware to properly generate MSI-X interrupts
2186  * @hw
2187  *  board private structure
2188  */
2189 static void
2190 ngbe_configure_msix(struct rte_eth_dev *dev)
2191 {
2192         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2193         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2194         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2195         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2196         uint32_t vec = NGBE_MISC_VEC_ID;
2197         uint32_t gpie;
2198
2199         /*
2200          * Won't configure MSI-X register if no mapping is done
2201          * between intr vector and event fd
2202          * but if MSI-X has been enabled already, need to configure
2203          * auto clean, auto mask and throttling.
2204          */
2205         gpie = rd32(hw, NGBE_GPIE);
2206         if (!rte_intr_dp_is_en(intr_handle) &&
2207             !(gpie & NGBE_GPIE_MSIX))
2208                 return;
2209
2210         if (rte_intr_allow_others(intr_handle)) {
2211                 base = NGBE_RX_VEC_START;
2212                 vec = base;
2213         }
2214
2215         /* setup GPIE for MSI-X mode */
2216         gpie = rd32(hw, NGBE_GPIE);
2217         gpie |= NGBE_GPIE_MSIX;
2218         wr32(hw, NGBE_GPIE, gpie);
2219
2220         /* Populate the IVAR table and set the ITR values to the
2221          * corresponding register.
2222          */
2223         if (rte_intr_dp_is_en(intr_handle)) {
2224                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2225                         queue_id++) {
2226                         /* by default, 1:1 mapping */
2227                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2228                         rte_intr_vec_list_index_set(intr_handle,
2229                                                            queue_id, vec);
2230                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2231                             - 1)
2232                                 vec++;
2233                 }
2234
2235                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2236         }
2237         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2238                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2239                         | NGBE_ITR_WRDSA);
2240 }
2241
2242 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2243         .dev_configure              = ngbe_dev_configure,
2244         .dev_infos_get              = ngbe_dev_info_get,
2245         .dev_start                  = ngbe_dev_start,
2246         .dev_stop                   = ngbe_dev_stop,
2247         .dev_close                  = ngbe_dev_close,
2248         .dev_reset                  = ngbe_dev_reset,
2249         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2250         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2251         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2252         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2253         .link_update                = ngbe_dev_link_update,
2254         .stats_get                  = ngbe_dev_stats_get,
2255         .xstats_get                 = ngbe_dev_xstats_get,
2256         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2257         .stats_reset                = ngbe_dev_stats_reset,
2258         .xstats_reset               = ngbe_dev_xstats_reset,
2259         .xstats_get_names           = ngbe_dev_xstats_get_names,
2260         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2261         .fw_version_get             = ngbe_fw_version_get,
2262         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2263         .mtu_set                    = ngbe_dev_mtu_set,
2264         .vlan_filter_set            = ngbe_vlan_filter_set,
2265         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2266         .vlan_offload_set           = ngbe_vlan_offload_set,
2267         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2268         .rx_queue_start             = ngbe_dev_rx_queue_start,
2269         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2270         .tx_queue_start             = ngbe_dev_tx_queue_start,
2271         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2272         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2273         .rx_queue_release           = ngbe_dev_rx_queue_release,
2274         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2275         .tx_queue_release           = ngbe_dev_tx_queue_release,
2276         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2277         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2278 };
2279
2280 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2281 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2282 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2283
2284 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2285 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2286
2287 #ifdef RTE_ETHDEV_DEBUG_RX
2288         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2289 #endif
2290 #ifdef RTE_ETHDEV_DEBUG_TX
2291         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2292 #endif