net/ngbe: support register dump
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17
18 static const struct reg_info ngbe_regs_general[] = {
19         {NGBE_RST, 1, 1, "NGBE_RST"},
20         {NGBE_STAT, 1, 1, "NGBE_STAT"},
21         {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22         {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23         {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24         {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25         {0, 0, 0, ""}
26 };
27
28 static const struct reg_info ngbe_regs_nvm[] = {
29         {0, 0, 0, ""}
30 };
31
32 static const struct reg_info ngbe_regs_interrupt[] = {
33         {0, 0, 0, ""}
34 };
35
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37         {0, 0, 0, ""}
38 };
39
40 static const struct reg_info ngbe_regs_rxdma[] = {
41         {0, 0, 0, ""}
42 };
43
44 static const struct reg_info ngbe_regs_rx[] = {
45         {0, 0, 0, ""}
46 };
47
48 static struct reg_info ngbe_regs_tx[] = {
49         {0, 0, 0, ""}
50 };
51
52 static const struct reg_info ngbe_regs_wakeup[] = {
53         {0, 0, 0, ""}
54 };
55
56 static const struct reg_info ngbe_regs_mac[] = {
57         {0, 0, 0, ""}
58 };
59
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61         {0, 0, 0, ""},
62 };
63
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66                                 ngbe_regs_general,
67                                 ngbe_regs_nvm,
68                                 ngbe_regs_interrupt,
69                                 ngbe_regs_fctl_others,
70                                 ngbe_regs_rxdma,
71                                 ngbe_regs_rx,
72                                 ngbe_regs_tx,
73                                 ngbe_regs_wakeup,
74                                 ngbe_regs_mac,
75                                 ngbe_regs_diagnostic,
76                                 NULL};
77
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80                                 int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84                                         uint16_t queue);
85
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_dev_interrupt_delayed_handler(void *param);
93 static void ngbe_configure_msix(struct rte_eth_dev *dev);
94
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98                 (h)->bitmap[idx] |= 1 << bit;\
99         } while (0)
100
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104                 (h)->bitmap[idx] &= ~(1 << bit);\
105         } while (0)
106
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110                 (r) = (h)->bitmap[idx] >> bit & 1;\
111         } while (0)
112
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129         { .vendor_id = 0, /* sentinel */ },
130 };
131
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133         .nb_max = NGBE_RING_DESC_MAX,
134         .nb_min = NGBE_RING_DESC_MIN,
135         .nb_align = NGBE_RXD_ALIGN,
136 };
137
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139         .nb_max = NGBE_RING_DESC_MAX,
140         .nb_min = NGBE_RING_DESC_MIN,
141         .nb_align = NGBE_TXD_ALIGN,
142         .nb_seg_max = NGBE_TX_MAX_SEG,
143         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151         /* MNG RxTx */
152         HW_XSTAT(mng_bmc2host_packets),
153         HW_XSTAT(mng_host2bmc_packets),
154         /* Basic RxTx */
155         HW_XSTAT(rx_packets),
156         HW_XSTAT(tx_packets),
157         HW_XSTAT(rx_bytes),
158         HW_XSTAT(tx_bytes),
159         HW_XSTAT(rx_total_bytes),
160         HW_XSTAT(rx_total_packets),
161         HW_XSTAT(tx_total_packets),
162         HW_XSTAT(rx_total_missed_packets),
163         HW_XSTAT(rx_broadcast_packets),
164         HW_XSTAT(rx_multicast_packets),
165         HW_XSTAT(rx_management_packets),
166         HW_XSTAT(tx_management_packets),
167         HW_XSTAT(rx_management_dropped),
168
169         /* Basic Error */
170         HW_XSTAT(rx_crc_errors),
171         HW_XSTAT(rx_illegal_byte_errors),
172         HW_XSTAT(rx_error_bytes),
173         HW_XSTAT(rx_mac_short_packet_dropped),
174         HW_XSTAT(rx_length_errors),
175         HW_XSTAT(rx_undersize_errors),
176         HW_XSTAT(rx_fragment_errors),
177         HW_XSTAT(rx_oversize_errors),
178         HW_XSTAT(rx_jabber_errors),
179         HW_XSTAT(rx_l3_l4_xsum_error),
180         HW_XSTAT(mac_local_errors),
181         HW_XSTAT(mac_remote_errors),
182
183         /* MACSEC */
184         HW_XSTAT(tx_macsec_pkts_untagged),
185         HW_XSTAT(tx_macsec_pkts_encrypted),
186         HW_XSTAT(tx_macsec_pkts_protected),
187         HW_XSTAT(tx_macsec_octets_encrypted),
188         HW_XSTAT(tx_macsec_octets_protected),
189         HW_XSTAT(rx_macsec_pkts_untagged),
190         HW_XSTAT(rx_macsec_pkts_badtag),
191         HW_XSTAT(rx_macsec_pkts_nosci),
192         HW_XSTAT(rx_macsec_pkts_unknownsci),
193         HW_XSTAT(rx_macsec_octets_decrypted),
194         HW_XSTAT(rx_macsec_octets_validated),
195         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
196         HW_XSTAT(rx_macsec_sc_pkts_delayed),
197         HW_XSTAT(rx_macsec_sc_pkts_late),
198         HW_XSTAT(rx_macsec_sa_pkts_ok),
199         HW_XSTAT(rx_macsec_sa_pkts_invalid),
200         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
201         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
202         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
203
204         /* MAC RxTx */
205         HW_XSTAT(rx_size_64_packets),
206         HW_XSTAT(rx_size_65_to_127_packets),
207         HW_XSTAT(rx_size_128_to_255_packets),
208         HW_XSTAT(rx_size_256_to_511_packets),
209         HW_XSTAT(rx_size_512_to_1023_packets),
210         HW_XSTAT(rx_size_1024_to_max_packets),
211         HW_XSTAT(tx_size_64_packets),
212         HW_XSTAT(tx_size_65_to_127_packets),
213         HW_XSTAT(tx_size_128_to_255_packets),
214         HW_XSTAT(tx_size_256_to_511_packets),
215         HW_XSTAT(tx_size_512_to_1023_packets),
216         HW_XSTAT(tx_size_1024_to_max_packets),
217
218         /* Flow Control */
219         HW_XSTAT(tx_xon_packets),
220         HW_XSTAT(rx_xon_packets),
221         HW_XSTAT(tx_xoff_packets),
222         HW_XSTAT(rx_xoff_packets),
223
224         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
225         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
226         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
227         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
228 };
229
230 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
231                            sizeof(rte_ngbe_stats_strings[0]))
232
233 /* Per-queue statistics */
234 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
235 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
236         QP_XSTAT(rx_qp_packets),
237         QP_XSTAT(tx_qp_packets),
238         QP_XSTAT(rx_qp_bytes),
239         QP_XSTAT(tx_qp_bytes),
240         QP_XSTAT(rx_qp_mc_packets),
241 };
242
243 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
244                            sizeof(rte_ngbe_qp_strings[0]))
245
246 static inline int32_t
247 ngbe_pf_reset_hw(struct ngbe_hw *hw)
248 {
249         uint32_t ctrl_ext;
250         int32_t status;
251
252         status = hw->mac.reset_hw(hw);
253
254         ctrl_ext = rd32(hw, NGBE_PORTCTL);
255         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
256         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
257         wr32(hw, NGBE_PORTCTL, ctrl_ext);
258         ngbe_flush(hw);
259
260         if (status == NGBE_ERR_SFP_NOT_PRESENT)
261                 status = 0;
262         return status;
263 }
264
265 static inline void
266 ngbe_enable_intr(struct rte_eth_dev *dev)
267 {
268         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
269         struct ngbe_hw *hw = ngbe_dev_hw(dev);
270
271         wr32(hw, NGBE_IENMISC, intr->mask_misc);
272         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
273         ngbe_flush(hw);
274 }
275
276 static void
277 ngbe_disable_intr(struct ngbe_hw *hw)
278 {
279         PMD_INIT_FUNC_TRACE();
280
281         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
282         ngbe_flush(hw);
283 }
284
285 /*
286  * Ensure that all locks are released before first NVM or PHY access
287  */
288 static void
289 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
290 {
291         uint16_t mask;
292
293         /*
294          * These ones are more tricky since they are common to all ports; but
295          * swfw_sync retries last long enough (1s) to be almost sure that if
296          * lock can not be taken it is due to an improper lock of the
297          * semaphore.
298          */
299         mask = NGBE_MNGSEM_SWPHY |
300                NGBE_MNGSEM_SWMBX |
301                NGBE_MNGSEM_SWFLASH;
302         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
303                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
304
305         hw->mac.release_swfw_sync(hw, mask);
306 }
307
308 static int
309 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
310 {
311         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
312         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
313         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
314         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
315         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
316         const struct rte_memzone *mz;
317         uint32_t ctrl_ext;
318         int err, ret;
319
320         PMD_INIT_FUNC_TRACE();
321
322         eth_dev->dev_ops = &ngbe_eth_dev_ops;
323         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
324         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
325         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
326
327         /*
328          * For secondary processes, we don't initialise any further as primary
329          * has already done this work. Only check we don't need a different
330          * Rx and Tx function.
331          */
332         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
333                 struct ngbe_tx_queue *txq;
334                 /* Tx queue function in primary, set by last queue initialized
335                  * Tx queue may not initialized by primary process
336                  */
337                 if (eth_dev->data->tx_queues) {
338                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
339                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
340                         ngbe_set_tx_function(eth_dev, txq);
341                 } else {
342                         /* Use default Tx function if we get here */
343                         PMD_INIT_LOG(NOTICE,
344                                 "No Tx queues configured yet. Using default Tx function.");
345                 }
346
347                 ngbe_set_rx_function(eth_dev);
348
349                 return 0;
350         }
351
352         rte_eth_copy_pci_info(eth_dev, pci_dev);
353         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
354
355         /* Vendor and Device ID need to be set before init of shared code */
356         hw->device_id = pci_dev->id.device_id;
357         hw->vendor_id = pci_dev->id.vendor_id;
358         hw->sub_system_id = pci_dev->id.subsystem_device_id;
359         ngbe_map_device_id(hw);
360         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
361
362         /* Reserve memory for interrupt status block */
363         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
364                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
365         if (mz == NULL)
366                 return -ENOMEM;
367
368         hw->isb_dma = TMZ_PADDR(mz);
369         hw->isb_mem = TMZ_VADDR(mz);
370
371         /* Initialize the shared code (base driver) */
372         err = ngbe_init_shared_code(hw);
373         if (err != 0) {
374                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
375                 return -EIO;
376         }
377
378         /* Unlock any pending hardware semaphore */
379         ngbe_swfw_lock_reset(hw);
380
381         /* Get Hardware Flow Control setting */
382         hw->fc.requested_mode = ngbe_fc_full;
383         hw->fc.current_mode = ngbe_fc_full;
384         hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
385         hw->fc.low_water = NGBE_FC_XON_LOTH;
386         hw->fc.high_water = NGBE_FC_XOFF_HITH;
387         hw->fc.send_xon = 1;
388
389         err = hw->rom.init_params(hw);
390         if (err != 0) {
391                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
392                 return -EIO;
393         }
394
395         /* Make sure we have a good EEPROM before we read from it */
396         err = hw->rom.validate_checksum(hw, NULL);
397         if (err != 0) {
398                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
399                 return -EIO;
400         }
401
402         err = hw->mac.init_hw(hw);
403         if (err != 0) {
404                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
405                 return -EIO;
406         }
407
408         /* Reset the hw statistics */
409         ngbe_dev_stats_reset(eth_dev);
410
411         /* disable interrupt */
412         ngbe_disable_intr(hw);
413
414         /* Allocate memory for storing MAC addresses */
415         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
416                                                hw->mac.num_rar_entries, 0);
417         if (eth_dev->data->mac_addrs == NULL) {
418                 PMD_INIT_LOG(ERR,
419                              "Failed to allocate %u bytes needed to store MAC addresses",
420                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
421                 return -ENOMEM;
422         }
423
424         /* Copy the permanent MAC address */
425         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
426                         &eth_dev->data->mac_addrs[0]);
427
428         /* Allocate memory for storing hash filter MAC addresses */
429         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
430                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
431         if (eth_dev->data->hash_mac_addrs == NULL) {
432                 PMD_INIT_LOG(ERR,
433                              "Failed to allocate %d bytes needed to store MAC addresses",
434                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
435                 rte_free(eth_dev->data->mac_addrs);
436                 eth_dev->data->mac_addrs = NULL;
437                 return -ENOMEM;
438         }
439
440         /* initialize the vfta */
441         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
442
443         /* initialize the hw strip bitmap*/
444         memset(hwstrip, 0, sizeof(*hwstrip));
445
446         /* initialize PF if max_vfs not zero */
447         ret = ngbe_pf_host_init(eth_dev);
448         if (ret) {
449                 rte_free(eth_dev->data->mac_addrs);
450                 eth_dev->data->mac_addrs = NULL;
451                 rte_free(eth_dev->data->hash_mac_addrs);
452                 eth_dev->data->hash_mac_addrs = NULL;
453                 return ret;
454         }
455
456         ctrl_ext = rd32(hw, NGBE_PORTCTL);
457         /* let hardware know driver is loaded */
458         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
459         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
460         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
461         wr32(hw, NGBE_PORTCTL, ctrl_ext);
462         ngbe_flush(hw);
463
464         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
465                         (int)hw->mac.type, (int)hw->phy.type);
466
467         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
468                      eth_dev->data->port_id, pci_dev->id.vendor_id,
469                      pci_dev->id.device_id);
470
471         rte_intr_callback_register(intr_handle,
472                                    ngbe_dev_interrupt_handler, eth_dev);
473
474         /* enable uio/vfio intr/eventfd mapping */
475         rte_intr_enable(intr_handle);
476
477         /* enable support intr */
478         ngbe_enable_intr(eth_dev);
479
480         return 0;
481 }
482
483 static int
484 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
485 {
486         PMD_INIT_FUNC_TRACE();
487
488         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
489                 return 0;
490
491         ngbe_dev_close(eth_dev);
492
493         return 0;
494 }
495
496 static int
497 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
498                 struct rte_pci_device *pci_dev)
499 {
500         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
501                         sizeof(struct ngbe_adapter),
502                         eth_dev_pci_specific_init, pci_dev,
503                         eth_ngbe_dev_init, NULL);
504 }
505
506 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
507 {
508         struct rte_eth_dev *ethdev;
509
510         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
511         if (ethdev == NULL)
512                 return 0;
513
514         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
515 }
516
517 static struct rte_pci_driver rte_ngbe_pmd = {
518         .id_table = pci_id_ngbe_map,
519         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
520                      RTE_PCI_DRV_INTR_LSC,
521         .probe = eth_ngbe_pci_probe,
522         .remove = eth_ngbe_pci_remove,
523 };
524
525 static int
526 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
527 {
528         struct ngbe_hw *hw = ngbe_dev_hw(dev);
529         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
530         uint32_t vfta;
531         uint32_t vid_idx;
532         uint32_t vid_bit;
533
534         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
535         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
536         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
537         if (on)
538                 vfta |= vid_bit;
539         else
540                 vfta &= ~vid_bit;
541         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
542
543         /* update local VFTA copy */
544         shadow_vfta->vfta[vid_idx] = vfta;
545
546         return 0;
547 }
548
549 static void
550 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
551 {
552         struct ngbe_hw *hw = ngbe_dev_hw(dev);
553         struct ngbe_rx_queue *rxq;
554         bool restart;
555         uint32_t rxcfg, rxbal, rxbah;
556
557         if (on)
558                 ngbe_vlan_hw_strip_enable(dev, queue);
559         else
560                 ngbe_vlan_hw_strip_disable(dev, queue);
561
562         rxq = dev->data->rx_queues[queue];
563         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
564         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
565         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
566         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
567                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
568                         !(rxcfg & NGBE_RXCFG_VLAN);
569                 rxcfg |= NGBE_RXCFG_VLAN;
570         } else {
571                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
572                         (rxcfg & NGBE_RXCFG_VLAN);
573                 rxcfg &= ~NGBE_RXCFG_VLAN;
574         }
575         rxcfg &= ~NGBE_RXCFG_ENA;
576
577         if (restart) {
578                 /* set vlan strip for ring */
579                 ngbe_dev_rx_queue_stop(dev, queue);
580                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
581                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
582                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
583                 ngbe_dev_rx_queue_start(dev, queue);
584         }
585 }
586
587 static int
588 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
589                     enum rte_vlan_type vlan_type,
590                     uint16_t tpid)
591 {
592         struct ngbe_hw *hw = ngbe_dev_hw(dev);
593         int ret = 0;
594         uint32_t portctrl, vlan_ext, qinq;
595
596         portctrl = rd32(hw, NGBE_PORTCTL);
597
598         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
599         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
600         switch (vlan_type) {
601         case RTE_ETH_VLAN_TYPE_INNER:
602                 if (vlan_ext) {
603                         wr32m(hw, NGBE_VLANCTL,
604                                 NGBE_VLANCTL_TPID_MASK,
605                                 NGBE_VLANCTL_TPID(tpid));
606                         wr32m(hw, NGBE_DMATXCTRL,
607                                 NGBE_DMATXCTRL_TPID_MASK,
608                                 NGBE_DMATXCTRL_TPID(tpid));
609                 } else {
610                         ret = -ENOTSUP;
611                         PMD_DRV_LOG(ERR,
612                                 "Inner type is not supported by single VLAN");
613                 }
614
615                 if (qinq) {
616                         wr32m(hw, NGBE_TAGTPID(0),
617                                 NGBE_TAGTPID_LSB_MASK,
618                                 NGBE_TAGTPID_LSB(tpid));
619                 }
620                 break;
621         case RTE_ETH_VLAN_TYPE_OUTER:
622                 if (vlan_ext) {
623                         /* Only the high 16-bits is valid */
624                         wr32m(hw, NGBE_EXTAG,
625                                 NGBE_EXTAG_VLAN_MASK,
626                                 NGBE_EXTAG_VLAN(tpid));
627                 } else {
628                         wr32m(hw, NGBE_VLANCTL,
629                                 NGBE_VLANCTL_TPID_MASK,
630                                 NGBE_VLANCTL_TPID(tpid));
631                         wr32m(hw, NGBE_DMATXCTRL,
632                                 NGBE_DMATXCTRL_TPID_MASK,
633                                 NGBE_DMATXCTRL_TPID(tpid));
634                 }
635
636                 if (qinq) {
637                         wr32m(hw, NGBE_TAGTPID(0),
638                                 NGBE_TAGTPID_MSB_MASK,
639                                 NGBE_TAGTPID_MSB(tpid));
640                 }
641                 break;
642         default:
643                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
644                 return -EINVAL;
645         }
646
647         return ret;
648 }
649
650 void
651 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
652 {
653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
654         uint32_t vlnctrl;
655
656         PMD_INIT_FUNC_TRACE();
657
658         /* Filter Table Disable */
659         vlnctrl = rd32(hw, NGBE_VLANCTL);
660         vlnctrl &= ~NGBE_VLANCTL_VFE;
661         wr32(hw, NGBE_VLANCTL, vlnctrl);
662 }
663
664 void
665 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
666 {
667         struct ngbe_hw *hw = ngbe_dev_hw(dev);
668         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
669         uint32_t vlnctrl;
670         uint16_t i;
671
672         PMD_INIT_FUNC_TRACE();
673
674         /* Filter Table Enable */
675         vlnctrl = rd32(hw, NGBE_VLANCTL);
676         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
677         vlnctrl |= NGBE_VLANCTL_VFE;
678         wr32(hw, NGBE_VLANCTL, vlnctrl);
679
680         /* write whatever is in local vfta copy */
681         for (i = 0; i < NGBE_VFTA_SIZE; i++)
682                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
683 }
684
685 void
686 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
687 {
688         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
689         struct ngbe_rx_queue *rxq;
690
691         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
692                 return;
693
694         if (on)
695                 NGBE_SET_HWSTRIP(hwstrip, queue);
696         else
697                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
698
699         if (queue >= dev->data->nb_rx_queues)
700                 return;
701
702         rxq = dev->data->rx_queues[queue];
703
704         if (on) {
705                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
706                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
707         } else {
708                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
709                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
710         }
711 }
712
713 static void
714 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
715 {
716         struct ngbe_hw *hw = ngbe_dev_hw(dev);
717         uint32_t ctrl;
718
719         PMD_INIT_FUNC_TRACE();
720
721         ctrl = rd32(hw, NGBE_RXCFG(queue));
722         ctrl &= ~NGBE_RXCFG_VLAN;
723         wr32(hw, NGBE_RXCFG(queue), ctrl);
724
725         /* record those setting for HW strip per queue */
726         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
727 }
728
729 static void
730 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
731 {
732         struct ngbe_hw *hw = ngbe_dev_hw(dev);
733         uint32_t ctrl;
734
735         PMD_INIT_FUNC_TRACE();
736
737         ctrl = rd32(hw, NGBE_RXCFG(queue));
738         ctrl |= NGBE_RXCFG_VLAN;
739         wr32(hw, NGBE_RXCFG(queue), ctrl);
740
741         /* record those setting for HW strip per queue */
742         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
743 }
744
745 static void
746 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
747 {
748         struct ngbe_hw *hw = ngbe_dev_hw(dev);
749         uint32_t ctrl;
750
751         PMD_INIT_FUNC_TRACE();
752
753         ctrl = rd32(hw, NGBE_PORTCTL);
754         ctrl &= ~NGBE_PORTCTL_VLANEXT;
755         ctrl &= ~NGBE_PORTCTL_QINQ;
756         wr32(hw, NGBE_PORTCTL, ctrl);
757 }
758
759 static void
760 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
761 {
762         struct ngbe_hw *hw = ngbe_dev_hw(dev);
763         uint32_t ctrl;
764
765         PMD_INIT_FUNC_TRACE();
766
767         ctrl  = rd32(hw, NGBE_PORTCTL);
768         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
769         wr32(hw, NGBE_PORTCTL, ctrl);
770 }
771
772 static void
773 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
774 {
775         struct ngbe_hw *hw = ngbe_dev_hw(dev);
776         uint32_t ctrl;
777
778         PMD_INIT_FUNC_TRACE();
779
780         ctrl = rd32(hw, NGBE_PORTCTL);
781         ctrl &= ~NGBE_PORTCTL_QINQ;
782         wr32(hw, NGBE_PORTCTL, ctrl);
783 }
784
785 static void
786 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
787 {
788         struct ngbe_hw *hw = ngbe_dev_hw(dev);
789         uint32_t ctrl;
790
791         PMD_INIT_FUNC_TRACE();
792
793         ctrl  = rd32(hw, NGBE_PORTCTL);
794         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
795         wr32(hw, NGBE_PORTCTL, ctrl);
796 }
797
798 void
799 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
800 {
801         struct ngbe_rx_queue *rxq;
802         uint16_t i;
803
804         PMD_INIT_FUNC_TRACE();
805
806         for (i = 0; i < dev->data->nb_rx_queues; i++) {
807                 rxq = dev->data->rx_queues[i];
808
809                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
810                         ngbe_vlan_hw_strip_enable(dev, i);
811                 else
812                         ngbe_vlan_hw_strip_disable(dev, i);
813         }
814 }
815
816 void
817 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
818 {
819         uint16_t i;
820         struct rte_eth_rxmode *rxmode;
821         struct ngbe_rx_queue *rxq;
822
823         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
824                 rxmode = &dev->data->dev_conf.rxmode;
825                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
826                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
827                                 rxq = dev->data->rx_queues[i];
828                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
829                         }
830                 else
831                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
832                                 rxq = dev->data->rx_queues[i];
833                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
834                         }
835         }
836 }
837
838 static int
839 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
840 {
841         struct rte_eth_rxmode *rxmode;
842         rxmode = &dev->data->dev_conf.rxmode;
843
844         if (mask & RTE_ETH_VLAN_STRIP_MASK)
845                 ngbe_vlan_hw_strip_config(dev);
846
847         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
848                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
849                         ngbe_vlan_hw_filter_enable(dev);
850                 else
851                         ngbe_vlan_hw_filter_disable(dev);
852         }
853
854         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
855                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
856                         ngbe_vlan_hw_extend_enable(dev);
857                 else
858                         ngbe_vlan_hw_extend_disable(dev);
859         }
860
861         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
862                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
863                         ngbe_qinq_hw_strip_enable(dev);
864                 else
865                         ngbe_qinq_hw_strip_disable(dev);
866         }
867
868         return 0;
869 }
870
871 static int
872 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
873 {
874         ngbe_config_vlan_strip_on_all_queues(dev, mask);
875
876         ngbe_vlan_offload_config(dev, mask);
877
878         return 0;
879 }
880
881 static int
882 ngbe_dev_configure(struct rte_eth_dev *dev)
883 {
884         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
885         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
886
887         PMD_INIT_FUNC_TRACE();
888
889         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
890                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
891
892         /* set flag to update link status after init */
893         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
894
895         /*
896          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
897          * allocation Rx preconditions we will reset it.
898          */
899         adapter->rx_bulk_alloc_allowed = true;
900
901         return 0;
902 }
903
904 static void
905 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
906 {
907         struct ngbe_hw *hw = ngbe_dev_hw(dev);
908         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
909
910         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
911         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
912         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
913         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
914                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
915         else
916                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
917
918         intr->mask_misc |= NGBE_ICRMISC_GPIO;
919 }
920
921 /*
922  * Configure device link speed and setup link.
923  * It returns 0 on success.
924  */
925 static int
926 ngbe_dev_start(struct rte_eth_dev *dev)
927 {
928         struct ngbe_hw *hw = ngbe_dev_hw(dev);
929         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
930         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
931         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
932         uint32_t intr_vector = 0;
933         int err;
934         bool link_up = false, negotiate = false;
935         uint32_t speed = 0;
936         uint32_t allowed_speeds = 0;
937         int mask = 0;
938         int status;
939         uint32_t *link_speeds;
940
941         PMD_INIT_FUNC_TRACE();
942
943         /* disable uio/vfio intr/eventfd mapping */
944         rte_intr_disable(intr_handle);
945
946         /* stop adapter */
947         hw->adapter_stopped = 0;
948         ngbe_stop_hw(hw);
949
950         /* reinitialize adapter, this calls reset and start */
951         hw->nb_rx_queues = dev->data->nb_rx_queues;
952         hw->nb_tx_queues = dev->data->nb_tx_queues;
953         status = ngbe_pf_reset_hw(hw);
954         if (status != 0)
955                 return -1;
956         hw->mac.start_hw(hw);
957         hw->mac.get_link_status = true;
958
959         /* configure PF module if SRIOV enabled */
960         ngbe_pf_host_configure(dev);
961
962         ngbe_dev_phy_intr_setup(dev);
963
964         /* check and configure queue intr-vector mapping */
965         if ((rte_intr_cap_multiple(intr_handle) ||
966              !RTE_ETH_DEV_SRIOV(dev).active) &&
967             dev->data->dev_conf.intr_conf.rxq != 0) {
968                 intr_vector = dev->data->nb_rx_queues;
969                 if (rte_intr_efd_enable(intr_handle, intr_vector))
970                         return -1;
971         }
972
973         if (rte_intr_dp_is_en(intr_handle)) {
974                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
975                                                    dev->data->nb_rx_queues)) {
976                         PMD_INIT_LOG(ERR,
977                                      "Failed to allocate %d rx_queues intr_vec",
978                                      dev->data->nb_rx_queues);
979                         return -ENOMEM;
980                 }
981         }
982
983         /* confiugre MSI-X for sleep until Rx interrupt */
984         ngbe_configure_msix(dev);
985
986         /* initialize transmission unit */
987         ngbe_dev_tx_init(dev);
988
989         /* This can fail when allocating mbufs for descriptor rings */
990         err = ngbe_dev_rx_init(dev);
991         if (err != 0) {
992                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
993                 goto error;
994         }
995
996         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
997                 RTE_ETH_VLAN_EXTEND_MASK;
998         err = ngbe_vlan_offload_config(dev, mask);
999         if (err != 0) {
1000                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1001                 goto error;
1002         }
1003
1004         ngbe_configure_port(dev);
1005
1006         err = ngbe_dev_rxtx_start(dev);
1007         if (err < 0) {
1008                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1009                 goto error;
1010         }
1011
1012         /* Skip link setup if loopback mode is enabled. */
1013         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1014                 goto skip_link_setup;
1015
1016         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1017         if (err != 0)
1018                 goto error;
1019         dev->data->dev_link.link_status = link_up;
1020
1021         link_speeds = &dev->data->dev_conf.link_speeds;
1022         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1023                 negotiate = true;
1024
1025         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1026         if (err != 0)
1027                 goto error;
1028
1029         allowed_speeds = 0;
1030         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1031                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1032         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1033                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1034         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1035                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1036
1037         if (*link_speeds & ~allowed_speeds) {
1038                 PMD_INIT_LOG(ERR, "Invalid link setting");
1039                 goto error;
1040         }
1041
1042         speed = 0x0;
1043         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1044                 speed = hw->mac.default_speeds;
1045         } else {
1046                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1047                         speed |= NGBE_LINK_SPEED_1GB_FULL;
1048                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1049                         speed |= NGBE_LINK_SPEED_100M_FULL;
1050                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1051                         speed |= NGBE_LINK_SPEED_10M_FULL;
1052         }
1053
1054         hw->phy.init_hw(hw);
1055         err = hw->mac.setup_link(hw, speed, link_up);
1056         if (err != 0)
1057                 goto error;
1058
1059 skip_link_setup:
1060
1061         if (rte_intr_allow_others(intr_handle)) {
1062                 ngbe_dev_misc_interrupt_setup(dev);
1063                 /* check if lsc interrupt is enabled */
1064                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1065                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1066                 else
1067                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1068                 ngbe_dev_macsec_interrupt_setup(dev);
1069                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1070         } else {
1071                 rte_intr_callback_unregister(intr_handle,
1072                                              ngbe_dev_interrupt_handler, dev);
1073                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1074                         PMD_INIT_LOG(INFO,
1075                                      "LSC won't enable because of no intr multiplex");
1076         }
1077
1078         /* check if rxq interrupt is enabled */
1079         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1080             rte_intr_dp_is_en(intr_handle))
1081                 ngbe_dev_rxq_interrupt_setup(dev);
1082
1083         /* enable UIO/VFIO intr/eventfd mapping */
1084         rte_intr_enable(intr_handle);
1085
1086         /* resume enabled intr since HW reset */
1087         ngbe_enable_intr(dev);
1088
1089         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1090                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1091                 /* gpio0 is used to power on/off control*/
1092                 wr32(hw, NGBE_GPIODATA, 0);
1093         }
1094
1095         /*
1096          * Update link status right before return, because it may
1097          * start link configuration process in a separate thread.
1098          */
1099         ngbe_dev_link_update(dev, 0);
1100
1101         ngbe_read_stats_registers(hw, hw_stats);
1102         hw->offset_loaded = 1;
1103
1104         return 0;
1105
1106 error:
1107         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1108         ngbe_dev_clear_queues(dev);
1109         return -EIO;
1110 }
1111
1112 /*
1113  * Stop device: disable rx and tx functions to allow for reconfiguring.
1114  */
1115 static int
1116 ngbe_dev_stop(struct rte_eth_dev *dev)
1117 {
1118         struct rte_eth_link link;
1119         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1120         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1121         struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1122         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1123         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1124         int vf;
1125
1126         if (hw->adapter_stopped)
1127                 return 0;
1128
1129         PMD_INIT_FUNC_TRACE();
1130
1131         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1132                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1133                 /* gpio0 is used to power on/off control*/
1134                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1135         }
1136
1137         /* disable interrupts */
1138         ngbe_disable_intr(hw);
1139
1140         /* reset the NIC */
1141         ngbe_pf_reset_hw(hw);
1142         hw->adapter_stopped = 0;
1143
1144         /* stop adapter */
1145         ngbe_stop_hw(hw);
1146
1147         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1148                 vfinfo[vf].clear_to_send = false;
1149
1150         ngbe_dev_clear_queues(dev);
1151
1152         /* Clear stored conf */
1153         dev->data->scattered_rx = 0;
1154
1155         /* Clear recorded link status */
1156         memset(&link, 0, sizeof(link));
1157         rte_eth_linkstatus_set(dev, &link);
1158
1159         if (!rte_intr_allow_others(intr_handle))
1160                 /* resume to the default handler */
1161                 rte_intr_callback_register(intr_handle,
1162                                            ngbe_dev_interrupt_handler,
1163                                            (void *)dev);
1164
1165         /* Clean datapath event and queue/vec mapping */
1166         rte_intr_efd_disable(intr_handle);
1167         rte_intr_vec_list_free(intr_handle);
1168
1169         adapter->rss_reta_updated = 0;
1170
1171         hw->adapter_stopped = true;
1172         dev->data->dev_started = 0;
1173
1174         return 0;
1175 }
1176
1177 /*
1178  * Reset and stop device.
1179  */
1180 static int
1181 ngbe_dev_close(struct rte_eth_dev *dev)
1182 {
1183         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1184         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1185         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1186         int retries = 0;
1187         int ret;
1188
1189         PMD_INIT_FUNC_TRACE();
1190
1191         ngbe_pf_reset_hw(hw);
1192
1193         ngbe_dev_stop(dev);
1194
1195         ngbe_dev_free_queues(dev);
1196
1197         /* reprogram the RAR[0] in case user changed it. */
1198         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1199
1200         /* Unlock any pending hardware semaphore */
1201         ngbe_swfw_lock_reset(hw);
1202
1203         /* disable uio intr before callback unregister */
1204         rte_intr_disable(intr_handle);
1205
1206         do {
1207                 ret = rte_intr_callback_unregister(intr_handle,
1208                                 ngbe_dev_interrupt_handler, dev);
1209                 if (ret >= 0 || ret == -ENOENT) {
1210                         break;
1211                 } else if (ret != -EAGAIN) {
1212                         PMD_INIT_LOG(ERR,
1213                                 "intr callback unregister failed: %d",
1214                                 ret);
1215                 }
1216                 rte_delay_ms(100);
1217         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1218
1219         /* uninitialize PF if max_vfs not zero */
1220         ngbe_pf_host_uninit(dev);
1221
1222         rte_free(dev->data->mac_addrs);
1223         dev->data->mac_addrs = NULL;
1224
1225         rte_free(dev->data->hash_mac_addrs);
1226         dev->data->hash_mac_addrs = NULL;
1227
1228         return ret;
1229 }
1230
1231 /*
1232  * Reset PF device.
1233  */
1234 static int
1235 ngbe_dev_reset(struct rte_eth_dev *dev)
1236 {
1237         int ret;
1238
1239         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1240          * its VF to make them align with it. The detailed notification
1241          * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1242          * To avoid unexpected behavior in VF, currently reset of PF with
1243          * SR-IOV activation is not supported. It might be supported later.
1244          */
1245         if (dev->data->sriov.active)
1246                 return -ENOTSUP;
1247
1248         ret = eth_ngbe_dev_uninit(dev);
1249         if (ret != 0)
1250                 return ret;
1251
1252         ret = eth_ngbe_dev_init(dev, NULL);
1253
1254         return ret;
1255 }
1256
1257 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1258         {                                                       \
1259                 uint32_t current_counter = rd32(hw, reg);       \
1260                 if (current_counter < last_counter)             \
1261                         current_counter += 0x100000000LL;       \
1262                 if (!hw->offset_loaded)                         \
1263                         last_counter = current_counter;         \
1264                 counter = current_counter - last_counter;       \
1265                 counter &= 0xFFFFFFFFLL;                        \
1266         }
1267
1268 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1269         {                                                                \
1270                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1271                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1272                 uint64_t current_counter = (current_counter_msb << 32) | \
1273                         current_counter_lsb;                             \
1274                 if (current_counter < last_counter)                      \
1275                         current_counter += 0x1000000000LL;               \
1276                 if (!hw->offset_loaded)                                  \
1277                         last_counter = current_counter;                  \
1278                 counter = current_counter - last_counter;                \
1279                 counter &= 0xFFFFFFFFFLL;                                \
1280         }
1281
1282 void
1283 ngbe_read_stats_registers(struct ngbe_hw *hw,
1284                            struct ngbe_hw_stats *hw_stats)
1285 {
1286         unsigned int i;
1287
1288         /* QP Stats */
1289         for (i = 0; i < hw->nb_rx_queues; i++) {
1290                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1291                         hw->qp_last[i].rx_qp_packets,
1292                         hw_stats->qp[i].rx_qp_packets);
1293                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1294                         hw->qp_last[i].rx_qp_bytes,
1295                         hw_stats->qp[i].rx_qp_bytes);
1296                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1297                         hw->qp_last[i].rx_qp_mc_packets,
1298                         hw_stats->qp[i].rx_qp_mc_packets);
1299                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1300                         hw->qp_last[i].rx_qp_bc_packets,
1301                         hw_stats->qp[i].rx_qp_bc_packets);
1302         }
1303
1304         for (i = 0; i < hw->nb_tx_queues; i++) {
1305                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1306                         hw->qp_last[i].tx_qp_packets,
1307                         hw_stats->qp[i].tx_qp_packets);
1308                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1309                         hw->qp_last[i].tx_qp_bytes,
1310                         hw_stats->qp[i].tx_qp_bytes);
1311                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1312                         hw->qp_last[i].tx_qp_mc_packets,
1313                         hw_stats->qp[i].tx_qp_mc_packets);
1314                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1315                         hw->qp_last[i].tx_qp_bc_packets,
1316                         hw_stats->qp[i].tx_qp_bc_packets);
1317         }
1318
1319         /* PB Stats */
1320         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1321         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1322         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1323         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1324         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1325         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1326
1327         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1328         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1329
1330         /* DMA Stats */
1331         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1332         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1333         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1334         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1335         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1336         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1337         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1338         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1339
1340         /* MAC Stats */
1341         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1342         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1343         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1344
1345         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1346         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1347         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1348
1349         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1350         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1351
1352         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1353         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1354         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1355         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1356         hw_stats->rx_size_512_to_1023_packets +=
1357                         rd64(hw, NGBE_MACRX512TO1023L);
1358         hw_stats->rx_size_1024_to_max_packets +=
1359                         rd64(hw, NGBE_MACRX1024TOMAXL);
1360         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1361         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1362         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1363         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1364         hw_stats->tx_size_512_to_1023_packets +=
1365                         rd64(hw, NGBE_MACTX512TO1023L);
1366         hw_stats->tx_size_1024_to_max_packets +=
1367                         rd64(hw, NGBE_MACTX1024TOMAXL);
1368
1369         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1370         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1371         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1372
1373         /* MNG Stats */
1374         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1375         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1376         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1377         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1378
1379         /* MACsec Stats */
1380         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1381         hw_stats->tx_macsec_pkts_encrypted +=
1382                         rd32(hw, NGBE_LSECTX_ENCPKT);
1383         hw_stats->tx_macsec_pkts_protected +=
1384                         rd32(hw, NGBE_LSECTX_PROTPKT);
1385         hw_stats->tx_macsec_octets_encrypted +=
1386                         rd32(hw, NGBE_LSECTX_ENCOCT);
1387         hw_stats->tx_macsec_octets_protected +=
1388                         rd32(hw, NGBE_LSECTX_PROTOCT);
1389         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1390         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1391         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1392         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1393         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1394         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1395         hw_stats->rx_macsec_sc_pkts_unchecked +=
1396                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1397         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1398         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1399         for (i = 0; i < 2; i++) {
1400                 hw_stats->rx_macsec_sa_pkts_ok +=
1401                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1402                 hw_stats->rx_macsec_sa_pkts_invalid +=
1403                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1404                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1405                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1406         }
1407         for (i = 0; i < 4; i++) {
1408                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1409                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1410                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1411                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1412         }
1413         hw_stats->rx_total_missed_packets =
1414                         hw_stats->rx_up_dropped;
1415 }
1416
1417 static int
1418 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1419 {
1420         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1421         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1422         struct ngbe_stat_mappings *stat_mappings =
1423                         NGBE_DEV_STAT_MAPPINGS(dev);
1424         uint32_t i, j;
1425
1426         ngbe_read_stats_registers(hw, hw_stats);
1427
1428         if (stats == NULL)
1429                 return -EINVAL;
1430
1431         /* Fill out the rte_eth_stats statistics structure */
1432         stats->ipackets = hw_stats->rx_packets;
1433         stats->ibytes = hw_stats->rx_bytes;
1434         stats->opackets = hw_stats->tx_packets;
1435         stats->obytes = hw_stats->tx_bytes;
1436
1437         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1438         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1439         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1440         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1441         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1442         for (i = 0; i < NGBE_MAX_QP; i++) {
1443                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1444                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1445                 uint32_t q_map;
1446
1447                 q_map = (stat_mappings->rqsm[n] >> offset)
1448                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1449                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1450                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1451                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1452                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1453
1454                 q_map = (stat_mappings->tqsm[n] >> offset)
1455                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1456                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1457                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1458                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1459                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1460         }
1461
1462         /* Rx Errors */
1463         stats->imissed  = hw_stats->rx_total_missed_packets +
1464                           hw_stats->rx_dma_drop;
1465         stats->ierrors  = hw_stats->rx_crc_errors +
1466                           hw_stats->rx_mac_short_packet_dropped +
1467                           hw_stats->rx_length_errors +
1468                           hw_stats->rx_undersize_errors +
1469                           hw_stats->rx_oversize_errors +
1470                           hw_stats->rx_illegal_byte_errors +
1471                           hw_stats->rx_error_bytes +
1472                           hw_stats->rx_fragment_errors;
1473
1474         /* Tx Errors */
1475         stats->oerrors  = 0;
1476         return 0;
1477 }
1478
1479 static int
1480 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1481 {
1482         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1483         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1484
1485         /* HW registers are cleared on read */
1486         hw->offset_loaded = 0;
1487         ngbe_dev_stats_get(dev, NULL);
1488         hw->offset_loaded = 1;
1489
1490         /* Reset software totals */
1491         memset(hw_stats, 0, sizeof(*hw_stats));
1492
1493         return 0;
1494 }
1495
1496 /* This function calculates the number of xstats based on the current config */
1497 static unsigned
1498 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1499 {
1500         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1501         return NGBE_NB_HW_STATS +
1502                NGBE_NB_QP_STATS * nb_queues;
1503 }
1504
1505 static inline int
1506 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1507 {
1508         int nb, st;
1509
1510         /* Extended stats from ngbe_hw_stats */
1511         if (id < NGBE_NB_HW_STATS) {
1512                 snprintf(name, size, "[hw]%s",
1513                         rte_ngbe_stats_strings[id].name);
1514                 return 0;
1515         }
1516         id -= NGBE_NB_HW_STATS;
1517
1518         /* Queue Stats */
1519         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1520                 nb = id / NGBE_NB_QP_STATS;
1521                 st = id % NGBE_NB_QP_STATS;
1522                 snprintf(name, size, "[q%u]%s", nb,
1523                         rte_ngbe_qp_strings[st].name);
1524                 return 0;
1525         }
1526         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1527
1528         return -(int)(id + 1);
1529 }
1530
1531 static inline int
1532 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1533 {
1534         int nb, st;
1535
1536         /* Extended stats from ngbe_hw_stats */
1537         if (id < NGBE_NB_HW_STATS) {
1538                 *offset = rte_ngbe_stats_strings[id].offset;
1539                 return 0;
1540         }
1541         id -= NGBE_NB_HW_STATS;
1542
1543         /* Queue Stats */
1544         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1545                 nb = id / NGBE_NB_QP_STATS;
1546                 st = id % NGBE_NB_QP_STATS;
1547                 *offset = rte_ngbe_qp_strings[st].offset +
1548                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1549                 return 0;
1550         }
1551
1552         return -1;
1553 }
1554
1555 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1556         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1557 {
1558         unsigned int i, count;
1559
1560         count = ngbe_xstats_calc_num(dev);
1561         if (xstats_names == NULL)
1562                 return count;
1563
1564         /* Note: limit >= cnt_stats checked upstream
1565          * in rte_eth_xstats_names()
1566          */
1567         limit = min(limit, count);
1568
1569         /* Extended stats from ngbe_hw_stats */
1570         for (i = 0; i < limit; i++) {
1571                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1572                         sizeof(xstats_names[i].name))) {
1573                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1574                         break;
1575                 }
1576         }
1577
1578         return i;
1579 }
1580
1581 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1582         const uint64_t *ids,
1583         struct rte_eth_xstat_name *xstats_names,
1584         unsigned int limit)
1585 {
1586         unsigned int i;
1587
1588         if (ids == NULL)
1589                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1590
1591         for (i = 0; i < limit; i++) {
1592                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1593                                 sizeof(xstats_names[i].name))) {
1594                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1595                         return -1;
1596                 }
1597         }
1598
1599         return i;
1600 }
1601
1602 static int
1603 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1604                                          unsigned int limit)
1605 {
1606         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1607         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1608         unsigned int i, count;
1609
1610         ngbe_read_stats_registers(hw, hw_stats);
1611
1612         /* If this is a reset xstats is NULL, and we have cleared the
1613          * registers by reading them.
1614          */
1615         count = ngbe_xstats_calc_num(dev);
1616         if (xstats == NULL)
1617                 return count;
1618
1619         limit = min(limit, ngbe_xstats_calc_num(dev));
1620
1621         /* Extended stats from ngbe_hw_stats */
1622         for (i = 0; i < limit; i++) {
1623                 uint32_t offset = 0;
1624
1625                 if (ngbe_get_offset_by_id(i, &offset)) {
1626                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1627                         break;
1628                 }
1629                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1630                 xstats[i].id = i;
1631         }
1632
1633         return i;
1634 }
1635
1636 static int
1637 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1638                                          unsigned int limit)
1639 {
1640         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1641         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1642         unsigned int i, count;
1643
1644         ngbe_read_stats_registers(hw, hw_stats);
1645
1646         /* If this is a reset xstats is NULL, and we have cleared the
1647          * registers by reading them.
1648          */
1649         count = ngbe_xstats_calc_num(dev);
1650         if (values == NULL)
1651                 return count;
1652
1653         limit = min(limit, ngbe_xstats_calc_num(dev));
1654
1655         /* Extended stats from ngbe_hw_stats */
1656         for (i = 0; i < limit; i++) {
1657                 uint32_t offset;
1658
1659                 if (ngbe_get_offset_by_id(i, &offset)) {
1660                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1661                         break;
1662                 }
1663                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1664         }
1665
1666         return i;
1667 }
1668
1669 static int
1670 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1671                 uint64_t *values, unsigned int limit)
1672 {
1673         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1674         unsigned int i;
1675
1676         if (ids == NULL)
1677                 return ngbe_dev_xstats_get_(dev, values, limit);
1678
1679         for (i = 0; i < limit; i++) {
1680                 uint32_t offset;
1681
1682                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1683                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1684                         break;
1685                 }
1686                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1687         }
1688
1689         return i;
1690 }
1691
1692 static int
1693 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1694 {
1695         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1696         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1697
1698         /* HW registers are cleared on read */
1699         hw->offset_loaded = 0;
1700         ngbe_read_stats_registers(hw, hw_stats);
1701         hw->offset_loaded = 1;
1702
1703         /* Reset software totals */
1704         memset(hw_stats, 0, sizeof(*hw_stats));
1705
1706         return 0;
1707 }
1708
1709 static int
1710 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1711 {
1712         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1713         int ret;
1714
1715         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1716
1717         if (ret < 0)
1718                 return -EINVAL;
1719
1720         ret += 1; /* add the size of '\0' */
1721         if (fw_size < (size_t)ret)
1722                 return ret;
1723
1724         return 0;
1725 }
1726
1727 static int
1728 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1729 {
1730         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1731         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1732
1733         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1734         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1735         dev_info->min_rx_bufsize = 1024;
1736         dev_info->max_rx_pktlen = 15872;
1737         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1738         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1739         dev_info->max_vfs = pci_dev->max_vfs;
1740         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1741         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1742                                      dev_info->rx_queue_offload_capa);
1743         dev_info->tx_queue_offload_capa = 0;
1744         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1745
1746         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1747                 .rx_thresh = {
1748                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1749                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1750                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1751                 },
1752                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1753                 .rx_drop_en = 0,
1754                 .offloads = 0,
1755         };
1756
1757         dev_info->default_txconf = (struct rte_eth_txconf) {
1758                 .tx_thresh = {
1759                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1760                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1761                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1762                 },
1763                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1764                 .offloads = 0,
1765         };
1766
1767         dev_info->rx_desc_lim = rx_desc_lim;
1768         dev_info->tx_desc_lim = tx_desc_lim;
1769
1770         dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1771         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1772         dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1773
1774         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1775                                 RTE_ETH_LINK_SPEED_10M;
1776
1777         /* Driver-preferred Rx/Tx parameters */
1778         dev_info->default_rxportconf.burst_size = 32;
1779         dev_info->default_txportconf.burst_size = 32;
1780         dev_info->default_rxportconf.nb_queues = 1;
1781         dev_info->default_txportconf.nb_queues = 1;
1782         dev_info->default_rxportconf.ring_size = 256;
1783         dev_info->default_txportconf.ring_size = 256;
1784
1785         return 0;
1786 }
1787
1788 const uint32_t *
1789 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1790 {
1791         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1792             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1793             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1794             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1795                 return ngbe_get_supported_ptypes();
1796
1797         return NULL;
1798 }
1799
1800 /* return 0 means link status changed, -1 means not changed */
1801 int
1802 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1803                             int wait_to_complete)
1804 {
1805         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1806         struct rte_eth_link link;
1807         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1808         u32 lan_speed = 0;
1809         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1810         bool link_up;
1811         int err;
1812         int wait = 1;
1813
1814         memset(&link, 0, sizeof(link));
1815         link.link_status = RTE_ETH_LINK_DOWN;
1816         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1817         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1818         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1819                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1820
1821         hw->mac.get_link_status = true;
1822
1823         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1824                 return rte_eth_linkstatus_set(dev, &link);
1825
1826         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1827         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1828                 wait = 0;
1829
1830         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1831         if (err != 0) {
1832                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1833                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1834                 return rte_eth_linkstatus_set(dev, &link);
1835         }
1836
1837         if (!link_up)
1838                 return rte_eth_linkstatus_set(dev, &link);
1839
1840         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1841         link.link_status = RTE_ETH_LINK_UP;
1842         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1843
1844         switch (link_speed) {
1845         default:
1846         case NGBE_LINK_SPEED_UNKNOWN:
1847                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1848                 break;
1849
1850         case NGBE_LINK_SPEED_10M_FULL:
1851                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1852                 lan_speed = 0;
1853                 break;
1854
1855         case NGBE_LINK_SPEED_100M_FULL:
1856                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1857                 lan_speed = 1;
1858                 break;
1859
1860         case NGBE_LINK_SPEED_1GB_FULL:
1861                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1862                 lan_speed = 2;
1863                 break;
1864         }
1865
1866         if (hw->is_pf) {
1867                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1868                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1869                                 NGBE_LINK_SPEED_100M_FULL |
1870                                 NGBE_LINK_SPEED_10M_FULL)) {
1871                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1872                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1873                 }
1874         }
1875
1876         return rte_eth_linkstatus_set(dev, &link);
1877 }
1878
1879 static int
1880 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1881 {
1882         return ngbe_dev_link_update_share(dev, wait_to_complete);
1883 }
1884
1885 static int
1886 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1887 {
1888         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1889         uint32_t fctrl;
1890
1891         fctrl = rd32(hw, NGBE_PSRCTL);
1892         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1893         wr32(hw, NGBE_PSRCTL, fctrl);
1894
1895         return 0;
1896 }
1897
1898 static int
1899 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1900 {
1901         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1902         uint32_t fctrl;
1903
1904         fctrl = rd32(hw, NGBE_PSRCTL);
1905         fctrl &= (~NGBE_PSRCTL_UCP);
1906         if (dev->data->all_multicast == 1)
1907                 fctrl |= NGBE_PSRCTL_MCP;
1908         else
1909                 fctrl &= (~NGBE_PSRCTL_MCP);
1910         wr32(hw, NGBE_PSRCTL, fctrl);
1911
1912         return 0;
1913 }
1914
1915 static int
1916 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1917 {
1918         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1919         uint32_t fctrl;
1920
1921         fctrl = rd32(hw, NGBE_PSRCTL);
1922         fctrl |= NGBE_PSRCTL_MCP;
1923         wr32(hw, NGBE_PSRCTL, fctrl);
1924
1925         return 0;
1926 }
1927
1928 static int
1929 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1930 {
1931         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1932         uint32_t fctrl;
1933
1934         if (dev->data->promiscuous == 1)
1935                 return 0; /* must remain in all_multicast mode */
1936
1937         fctrl = rd32(hw, NGBE_PSRCTL);
1938         fctrl &= (~NGBE_PSRCTL_MCP);
1939         wr32(hw, NGBE_PSRCTL, fctrl);
1940
1941         return 0;
1942 }
1943
1944 /**
1945  * It clears the interrupt causes and enables the interrupt.
1946  * It will be called once only during NIC initialized.
1947  *
1948  * @param dev
1949  *  Pointer to struct rte_eth_dev.
1950  * @param on
1951  *  Enable or Disable.
1952  *
1953  * @return
1954  *  - On success, zero.
1955  *  - On failure, a negative value.
1956  */
1957 static int
1958 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1959 {
1960         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1961
1962         ngbe_dev_link_status_print(dev);
1963         if (on != 0) {
1964                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1965                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1966         } else {
1967                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1968                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1969         }
1970
1971         return 0;
1972 }
1973
1974 /**
1975  * It clears the interrupt causes and enables the interrupt.
1976  * It will be called once only during NIC initialized.
1977  *
1978  * @param dev
1979  *  Pointer to struct rte_eth_dev.
1980  *
1981  * @return
1982  *  - On success, zero.
1983  *  - On failure, a negative value.
1984  */
1985 static int
1986 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1987 {
1988         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1989         u64 mask;
1990
1991         mask = NGBE_ICR_MASK;
1992         mask &= (1ULL << NGBE_MISC_VEC_ID);
1993         intr->mask |= mask;
1994         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1995
1996         return 0;
1997 }
1998
1999 /**
2000  * It clears the interrupt causes and enables the interrupt.
2001  * It will be called once only during NIC initialized.
2002  *
2003  * @param dev
2004  *  Pointer to struct rte_eth_dev.
2005  *
2006  * @return
2007  *  - On success, zero.
2008  *  - On failure, a negative value.
2009  */
2010 static int
2011 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2012 {
2013         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2014         u64 mask;
2015
2016         mask = NGBE_ICR_MASK;
2017         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2018         intr->mask |= mask;
2019
2020         return 0;
2021 }
2022
2023 /**
2024  * It clears the interrupt causes and enables the interrupt.
2025  * It will be called once only during NIC initialized.
2026  *
2027  * @param dev
2028  *  Pointer to struct rte_eth_dev.
2029  *
2030  * @return
2031  *  - On success, zero.
2032  *  - On failure, a negative value.
2033  */
2034 static int
2035 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2036 {
2037         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2038
2039         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2040
2041         return 0;
2042 }
2043
2044 /*
2045  * It reads ICR and sets flag for the link_update.
2046  *
2047  * @param dev
2048  *  Pointer to struct rte_eth_dev.
2049  *
2050  * @return
2051  *  - On success, zero.
2052  *  - On failure, a negative value.
2053  */
2054 static int
2055 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2056 {
2057         uint32_t eicr;
2058         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2059         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2060
2061         /* clear all cause mask */
2062         ngbe_disable_intr(hw);
2063
2064         /* read-on-clear nic registers here */
2065         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2066         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2067
2068         intr->flags = 0;
2069
2070         /* set flag for async link update */
2071         if (eicr & NGBE_ICRMISC_PHY)
2072                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2073
2074         if (eicr & NGBE_ICRMISC_VFMBX)
2075                 intr->flags |= NGBE_FLAG_MAILBOX;
2076
2077         if (eicr & NGBE_ICRMISC_LNKSEC)
2078                 intr->flags |= NGBE_FLAG_MACSEC;
2079
2080         if (eicr & NGBE_ICRMISC_GPIO)
2081                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2082
2083         return 0;
2084 }
2085
2086 /**
2087  * It gets and then prints the link status.
2088  *
2089  * @param dev
2090  *  Pointer to struct rte_eth_dev.
2091  *
2092  * @return
2093  *  - On success, zero.
2094  *  - On failure, a negative value.
2095  */
2096 static void
2097 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2098 {
2099         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2100         struct rte_eth_link link;
2101
2102         rte_eth_linkstatus_get(dev, &link);
2103
2104         if (link.link_status == RTE_ETH_LINK_UP) {
2105                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2106                                         (int)(dev->data->port_id),
2107                                         (unsigned int)link.link_speed,
2108                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2109                                         "full-duplex" : "half-duplex");
2110         } else {
2111                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2112                                 (int)(dev->data->port_id));
2113         }
2114         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2115                                 pci_dev->addr.domain,
2116                                 pci_dev->addr.bus,
2117                                 pci_dev->addr.devid,
2118                                 pci_dev->addr.function);
2119 }
2120
2121 /*
2122  * It executes link_update after knowing an interrupt occurred.
2123  *
2124  * @param dev
2125  *  Pointer to struct rte_eth_dev.
2126  *
2127  * @return
2128  *  - On success, zero.
2129  *  - On failure, a negative value.
2130  */
2131 static int
2132 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2133 {
2134         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2135         int64_t timeout;
2136
2137         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2138
2139         if (intr->flags & NGBE_FLAG_MAILBOX) {
2140                 ngbe_pf_mbx_process(dev);
2141                 intr->flags &= ~NGBE_FLAG_MAILBOX;
2142         }
2143
2144         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2145                 struct rte_eth_link link;
2146
2147                 /*get the link status before link update, for predicting later*/
2148                 rte_eth_linkstatus_get(dev, &link);
2149
2150                 ngbe_dev_link_update(dev, 0);
2151
2152                 /* likely to up */
2153                 if (link.link_status != RTE_ETH_LINK_UP)
2154                         /* handle it 1 sec later, wait it being stable */
2155                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2156                 /* likely to down */
2157                 else
2158                         /* handle it 4 sec later, wait it being stable */
2159                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2160
2161                 ngbe_dev_link_status_print(dev);
2162                 if (rte_eal_alarm_set(timeout * 1000,
2163                                       ngbe_dev_interrupt_delayed_handler,
2164                                       (void *)dev) < 0) {
2165                         PMD_DRV_LOG(ERR, "Error setting alarm");
2166                 } else {
2167                         /* remember original mask */
2168                         intr->mask_misc_orig = intr->mask_misc;
2169                         /* only disable lsc interrupt */
2170                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2171
2172                         intr->mask_orig = intr->mask;
2173                         /* only disable all misc interrupts */
2174                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2175                 }
2176         }
2177
2178         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2179         ngbe_enable_intr(dev);
2180
2181         return 0;
2182 }
2183
2184 /**
2185  * Interrupt handler which shall be registered for alarm callback for delayed
2186  * handling specific interrupt to wait for the stable nic state. As the
2187  * NIC interrupt state is not stable for ngbe after link is just down,
2188  * it needs to wait 4 seconds to get the stable status.
2189  *
2190  * @param param
2191  *  The address of parameter (struct rte_eth_dev *) registered before.
2192  */
2193 static void
2194 ngbe_dev_interrupt_delayed_handler(void *param)
2195 {
2196         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2197         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2198         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2199         uint32_t eicr;
2200
2201         ngbe_disable_intr(hw);
2202
2203         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2204         if (eicr & NGBE_ICRMISC_VFMBX)
2205                 ngbe_pf_mbx_process(dev);
2206
2207         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2208                 ngbe_dev_link_update(dev, 0);
2209                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2210                 ngbe_dev_link_status_print(dev);
2211                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2212                                               NULL);
2213         }
2214
2215         if (intr->flags & NGBE_FLAG_MACSEC) {
2216                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2217                                               NULL);
2218                 intr->flags &= ~NGBE_FLAG_MACSEC;
2219         }
2220
2221         /* restore original mask */
2222         intr->mask_misc = intr->mask_misc_orig;
2223         intr->mask_misc_orig = 0;
2224         intr->mask = intr->mask_orig;
2225         intr->mask_orig = 0;
2226
2227         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2228         ngbe_enable_intr(dev);
2229 }
2230
2231 /**
2232  * Interrupt handler triggered by NIC  for handling
2233  * specific interrupt.
2234  *
2235  * @param param
2236  *  The address of parameter (struct rte_eth_dev *) registered before.
2237  */
2238 static void
2239 ngbe_dev_interrupt_handler(void *param)
2240 {
2241         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2242
2243         ngbe_dev_interrupt_get_status(dev);
2244         ngbe_dev_interrupt_action(dev);
2245 }
2246
2247 static int
2248 ngbe_dev_led_on(struct rte_eth_dev *dev)
2249 {
2250         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2251         return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2252 }
2253
2254 static int
2255 ngbe_dev_led_off(struct rte_eth_dev *dev)
2256 {
2257         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2258         return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2259 }
2260
2261 static int
2262 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2263 {
2264         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2265         uint32_t mflcn_reg;
2266         uint32_t fccfg_reg;
2267         int rx_pause;
2268         int tx_pause;
2269
2270         fc_conf->pause_time = hw->fc.pause_time;
2271         fc_conf->high_water = hw->fc.high_water;
2272         fc_conf->low_water = hw->fc.low_water;
2273         fc_conf->send_xon = hw->fc.send_xon;
2274         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2275
2276         /*
2277          * Return rx_pause status according to actual setting of
2278          * RXFCCFG register.
2279          */
2280         mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2281         if (mflcn_reg & NGBE_RXFCCFG_FC)
2282                 rx_pause = 1;
2283         else
2284                 rx_pause = 0;
2285
2286         /*
2287          * Return tx_pause status according to actual setting of
2288          * TXFCCFG register.
2289          */
2290         fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2291         if (fccfg_reg & NGBE_TXFCCFG_FC)
2292                 tx_pause = 1;
2293         else
2294                 tx_pause = 0;
2295
2296         if (rx_pause && tx_pause)
2297                 fc_conf->mode = RTE_ETH_FC_FULL;
2298         else if (rx_pause)
2299                 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2300         else if (tx_pause)
2301                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2302         else
2303                 fc_conf->mode = RTE_ETH_FC_NONE;
2304
2305         return 0;
2306 }
2307
2308 static int
2309 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2310 {
2311         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2312         int err;
2313         uint32_t rx_buf_size;
2314         uint32_t max_high_water;
2315         enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2316                 ngbe_fc_none,
2317                 ngbe_fc_rx_pause,
2318                 ngbe_fc_tx_pause,
2319                 ngbe_fc_full
2320         };
2321
2322         PMD_INIT_FUNC_TRACE();
2323
2324         rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2325         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2326
2327         /*
2328          * At least reserve one Ethernet frame for watermark
2329          * high_water/low_water in kilo bytes for ngbe
2330          */
2331         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2332         if (fc_conf->high_water > max_high_water ||
2333             fc_conf->high_water < fc_conf->low_water) {
2334                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2335                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2336                 return -EINVAL;
2337         }
2338
2339         hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2340         hw->fc.pause_time     = fc_conf->pause_time;
2341         hw->fc.high_water     = fc_conf->high_water;
2342         hw->fc.low_water      = fc_conf->low_water;
2343         hw->fc.send_xon       = fc_conf->send_xon;
2344         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2345
2346         err = hw->mac.fc_enable(hw);
2347
2348         /* Not negotiated is not an error case */
2349         if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2350                 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2351                       (fc_conf->mac_ctrl_frame_fwd
2352                        ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2353                 ngbe_flush(hw);
2354
2355                 return 0;
2356         }
2357
2358         PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2359         return -EIO;
2360 }
2361
2362 int
2363 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2364                           struct rte_eth_rss_reta_entry64 *reta_conf,
2365                           uint16_t reta_size)
2366 {
2367         uint8_t i, j, mask;
2368         uint32_t reta;
2369         uint16_t idx, shift;
2370         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2371         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2372
2373         PMD_INIT_FUNC_TRACE();
2374
2375         if (!hw->is_pf) {
2376                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2377                         "NIC.");
2378                 return -ENOTSUP;
2379         }
2380
2381         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2382                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2383                         "(%d) doesn't match the number hardware can supported "
2384                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2385                 return -EINVAL;
2386         }
2387
2388         for (i = 0; i < reta_size; i += 4) {
2389                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2390                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2391                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2392                 if (!mask)
2393                         continue;
2394
2395                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2396                 for (j = 0; j < 4; j++) {
2397                         if (RS8(mask, j, 0x1)) {
2398                                 reta  &= ~(MS32(8 * j, 0xFF));
2399                                 reta |= LS32(reta_conf[idx].reta[shift + j],
2400                                                 8 * j, 0xFF);
2401                         }
2402                 }
2403                 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2404         }
2405         adapter->rss_reta_updated = 1;
2406
2407         return 0;
2408 }
2409
2410 int
2411 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2412                          struct rte_eth_rss_reta_entry64 *reta_conf,
2413                          uint16_t reta_size)
2414 {
2415         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2416         uint8_t i, j, mask;
2417         uint32_t reta;
2418         uint16_t idx, shift;
2419
2420         PMD_INIT_FUNC_TRACE();
2421
2422         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2423                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2424                         "(%d) doesn't match the number hardware can supported "
2425                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2426                 return -EINVAL;
2427         }
2428
2429         for (i = 0; i < reta_size; i += 4) {
2430                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2431                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2432                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2433                 if (!mask)
2434                         continue;
2435
2436                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2437                 for (j = 0; j < 4; j++) {
2438                         if (RS8(mask, j, 0x1))
2439                                 reta_conf[idx].reta[shift + j] =
2440                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
2441                 }
2442         }
2443
2444         return 0;
2445 }
2446
2447 static int
2448 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2449                                 uint32_t index, uint32_t pool)
2450 {
2451         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2452         uint32_t enable_addr = 1;
2453
2454         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2455                              pool, enable_addr);
2456 }
2457
2458 static void
2459 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2460 {
2461         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2462
2463         ngbe_clear_rar(hw, index);
2464 }
2465
2466 static int
2467 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2468 {
2469         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2470
2471         ngbe_remove_rar(dev, 0);
2472         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2473
2474         return 0;
2475 }
2476
2477 static int
2478 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2479 {
2480         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2481         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2482         struct rte_eth_dev_data *dev_data = dev->data;
2483
2484         /* If device is started, refuse mtu that requires the support of
2485          * scattered packets when this feature has not been enabled before.
2486          */
2487         if (dev_data->dev_started && !dev_data->scattered_rx &&
2488             (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
2489              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2490                 PMD_INIT_LOG(ERR, "Stop port first.");
2491                 return -EINVAL;
2492         }
2493
2494         if (hw->mode)
2495                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2496                         NGBE_FRAME_SIZE_MAX);
2497         else
2498                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2499                         NGBE_FRMSZ_MAX(frame_size));
2500
2501         return 0;
2502 }
2503
2504 static uint32_t
2505 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2506 {
2507         uint32_t vector = 0;
2508
2509         switch (hw->mac.mc_filter_type) {
2510         case 0:   /* use bits [47:36] of the address */
2511                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2512                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2513                 break;
2514         case 1:   /* use bits [46:35] of the address */
2515                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2516                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2517                 break;
2518         case 2:   /* use bits [45:34] of the address */
2519                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2520                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2521                 break;
2522         case 3:   /* use bits [43:32] of the address */
2523                 vector = ((uc_addr->addr_bytes[4]) |
2524                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2525                 break;
2526         default:  /* Invalid mc_filter_type */
2527                 break;
2528         }
2529
2530         /* vector can only be 12-bits or boundary will be exceeded */
2531         vector &= 0xFFF;
2532         return vector;
2533 }
2534
2535 static int
2536 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2537                         struct rte_ether_addr *mac_addr, uint8_t on)
2538 {
2539         uint32_t vector;
2540         uint32_t uta_idx;
2541         uint32_t reg_val;
2542         uint32_t uta_mask;
2543         uint32_t psrctl;
2544
2545         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2546         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2547
2548         vector = ngbe_uta_vector(hw, mac_addr);
2549         uta_idx = (vector >> 5) & 0x7F;
2550         uta_mask = 0x1UL << (vector & 0x1F);
2551
2552         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2553                 return 0;
2554
2555         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2556         if (on) {
2557                 uta_info->uta_in_use++;
2558                 reg_val |= uta_mask;
2559                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2560         } else {
2561                 uta_info->uta_in_use--;
2562                 reg_val &= ~uta_mask;
2563                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2564         }
2565
2566         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2567
2568         psrctl = rd32(hw, NGBE_PSRCTL);
2569         if (uta_info->uta_in_use > 0)
2570                 psrctl |= NGBE_PSRCTL_UCHFENA;
2571         else
2572                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2573
2574         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2575         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2576         wr32(hw, NGBE_PSRCTL, psrctl);
2577
2578         return 0;
2579 }
2580
2581 static int
2582 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2583 {
2584         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2585         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2586         uint32_t psrctl;
2587         int i;
2588
2589         if (on) {
2590                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2591                         uta_info->uta_shadow[i] = ~0;
2592                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2593                 }
2594         } else {
2595                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2596                         uta_info->uta_shadow[i] = 0;
2597                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2598                 }
2599         }
2600
2601         psrctl = rd32(hw, NGBE_PSRCTL);
2602         if (on)
2603                 psrctl |= NGBE_PSRCTL_UCHFENA;
2604         else
2605                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2606
2607         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2608         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2609         wr32(hw, NGBE_PSRCTL, psrctl);
2610
2611         return 0;
2612 }
2613
2614 /**
2615  * Set the IVAR registers, mapping interrupt causes to vectors
2616  * @param hw
2617  *  pointer to ngbe_hw struct
2618  * @direction
2619  *  0 for Rx, 1 for Tx, -1 for other causes
2620  * @queue
2621  *  queue to map the corresponding interrupt to
2622  * @msix_vector
2623  *  the vector to map to the corresponding queue
2624  */
2625 void
2626 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2627                    uint8_t queue, uint8_t msix_vector)
2628 {
2629         uint32_t tmp, idx;
2630
2631         if (direction == -1) {
2632                 /* other causes */
2633                 msix_vector |= NGBE_IVARMISC_VLD;
2634                 idx = 0;
2635                 tmp = rd32(hw, NGBE_IVARMISC);
2636                 tmp &= ~(0xFF << idx);
2637                 tmp |= (msix_vector << idx);
2638                 wr32(hw, NGBE_IVARMISC, tmp);
2639         } else {
2640                 /* rx or tx causes */
2641                 /* Workround for ICR lost */
2642                 idx = ((16 * (queue & 1)) + (8 * direction));
2643                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2644                 tmp &= ~(0xFF << idx);
2645                 tmp |= (msix_vector << idx);
2646                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2647         }
2648 }
2649
2650 /**
2651  * Sets up the hardware to properly generate MSI-X interrupts
2652  * @hw
2653  *  board private structure
2654  */
2655 static void
2656 ngbe_configure_msix(struct rte_eth_dev *dev)
2657 {
2658         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2659         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2660         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2661         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2662         uint32_t vec = NGBE_MISC_VEC_ID;
2663         uint32_t gpie;
2664
2665         /*
2666          * Won't configure MSI-X register if no mapping is done
2667          * between intr vector and event fd
2668          * but if MSI-X has been enabled already, need to configure
2669          * auto clean, auto mask and throttling.
2670          */
2671         gpie = rd32(hw, NGBE_GPIE);
2672         if (!rte_intr_dp_is_en(intr_handle) &&
2673             !(gpie & NGBE_GPIE_MSIX))
2674                 return;
2675
2676         if (rte_intr_allow_others(intr_handle)) {
2677                 base = NGBE_RX_VEC_START;
2678                 vec = base;
2679         }
2680
2681         /* setup GPIE for MSI-X mode */
2682         gpie = rd32(hw, NGBE_GPIE);
2683         gpie |= NGBE_GPIE_MSIX;
2684         wr32(hw, NGBE_GPIE, gpie);
2685
2686         /* Populate the IVAR table and set the ITR values to the
2687          * corresponding register.
2688          */
2689         if (rte_intr_dp_is_en(intr_handle)) {
2690                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2691                         queue_id++) {
2692                         /* by default, 1:1 mapping */
2693                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2694                         rte_intr_vec_list_index_set(intr_handle,
2695                                                            queue_id, vec);
2696                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2697                             - 1)
2698                                 vec++;
2699                 }
2700
2701                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2702         }
2703         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2704                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2705                         | NGBE_ITR_WRDSA);
2706 }
2707
2708 static u8 *
2709 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2710                         u8 **mc_addr_ptr, u32 *vmdq)
2711 {
2712         u8 *mc_addr;
2713
2714         *vmdq = 0;
2715         mc_addr = *mc_addr_ptr;
2716         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2717         return mc_addr;
2718 }
2719
2720 int
2721 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2722                           struct rte_ether_addr *mc_addr_set,
2723                           uint32_t nb_mc_addr)
2724 {
2725         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2726         u8 *mc_addr_list;
2727
2728         mc_addr_list = (u8 *)mc_addr_set;
2729         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2730                                          ngbe_dev_addr_list_itr, TRUE);
2731 }
2732
2733 static int
2734 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2735 {
2736         int count = 0;
2737         int g_ind = 0;
2738         const struct reg_info *reg_group;
2739         const struct reg_info **reg_set = ngbe_regs_others;
2740
2741         while ((reg_group = reg_set[g_ind++]))
2742                 count += ngbe_regs_group_count(reg_group);
2743
2744         return count;
2745 }
2746
2747 static int
2748 ngbe_get_regs(struct rte_eth_dev *dev,
2749               struct rte_dev_reg_info *regs)
2750 {
2751         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2752         uint32_t *data = regs->data;
2753         int g_ind = 0;
2754         int count = 0;
2755         const struct reg_info *reg_group;
2756         const struct reg_info **reg_set = ngbe_regs_others;
2757
2758         if (data == NULL) {
2759                 regs->length = ngbe_get_reg_length(dev);
2760                 regs->width = sizeof(uint32_t);
2761                 return 0;
2762         }
2763
2764         /* Support only full register dump */
2765         if (regs->length == 0 ||
2766             regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2767                 regs->version = hw->mac.type << 24 |
2768                                 hw->revision_id << 16 |
2769                                 hw->device_id;
2770                 while ((reg_group = reg_set[g_ind++]))
2771                         count += ngbe_read_regs_group(dev, &data[count],
2772                                                       reg_group);
2773                 return 0;
2774         }
2775
2776         return -ENOTSUP;
2777 }
2778
2779 static int
2780 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2781 {
2782         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2783
2784         /* Return unit is byte count */
2785         return hw->rom.word_size * 2;
2786 }
2787
2788 static int
2789 ngbe_get_eeprom(struct rte_eth_dev *dev,
2790                 struct rte_dev_eeprom_info *in_eeprom)
2791 {
2792         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2793         struct ngbe_rom_info *eeprom = &hw->rom;
2794         uint16_t *data = in_eeprom->data;
2795         int first, length;
2796
2797         first = in_eeprom->offset >> 1;
2798         length = in_eeprom->length >> 1;
2799         if (first > hw->rom.word_size ||
2800             ((first + length) > hw->rom.word_size))
2801                 return -EINVAL;
2802
2803         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2804
2805         return eeprom->readw_buffer(hw, first, length, data);
2806 }
2807
2808 static int
2809 ngbe_set_eeprom(struct rte_eth_dev *dev,
2810                 struct rte_dev_eeprom_info *in_eeprom)
2811 {
2812         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2813         struct ngbe_rom_info *eeprom = &hw->rom;
2814         uint16_t *data = in_eeprom->data;
2815         int first, length;
2816
2817         first = in_eeprom->offset >> 1;
2818         length = in_eeprom->length >> 1;
2819         if (first > hw->rom.word_size ||
2820             ((first + length) > hw->rom.word_size))
2821                 return -EINVAL;
2822
2823         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2824
2825         return eeprom->writew_buffer(hw,  first, length, data);
2826 }
2827
2828 static const struct eth_dev_ops ngbe_eth_dev_ops = {
2829         .dev_configure              = ngbe_dev_configure,
2830         .dev_infos_get              = ngbe_dev_info_get,
2831         .dev_start                  = ngbe_dev_start,
2832         .dev_stop                   = ngbe_dev_stop,
2833         .dev_close                  = ngbe_dev_close,
2834         .dev_reset                  = ngbe_dev_reset,
2835         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
2836         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
2837         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
2838         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
2839         .link_update                = ngbe_dev_link_update,
2840         .stats_get                  = ngbe_dev_stats_get,
2841         .xstats_get                 = ngbe_dev_xstats_get,
2842         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
2843         .stats_reset                = ngbe_dev_stats_reset,
2844         .xstats_reset               = ngbe_dev_xstats_reset,
2845         .xstats_get_names           = ngbe_dev_xstats_get_names,
2846         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
2847         .fw_version_get             = ngbe_fw_version_get,
2848         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
2849         .mtu_set                    = ngbe_dev_mtu_set,
2850         .vlan_filter_set            = ngbe_vlan_filter_set,
2851         .vlan_tpid_set              = ngbe_vlan_tpid_set,
2852         .vlan_offload_set           = ngbe_vlan_offload_set,
2853         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
2854         .rx_queue_start             = ngbe_dev_rx_queue_start,
2855         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
2856         .tx_queue_start             = ngbe_dev_tx_queue_start,
2857         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
2858         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
2859         .rx_queue_release           = ngbe_dev_rx_queue_release,
2860         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
2861         .tx_queue_release           = ngbe_dev_tx_queue_release,
2862         .dev_led_on                 = ngbe_dev_led_on,
2863         .dev_led_off                = ngbe_dev_led_off,
2864         .flow_ctrl_get              = ngbe_flow_ctrl_get,
2865         .flow_ctrl_set              = ngbe_flow_ctrl_set,
2866         .mac_addr_add               = ngbe_add_rar,
2867         .mac_addr_remove            = ngbe_remove_rar,
2868         .mac_addr_set               = ngbe_set_default_mac_addr,
2869         .uc_hash_table_set          = ngbe_uc_hash_table_set,
2870         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
2871         .reta_update                = ngbe_dev_rss_reta_update,
2872         .reta_query                 = ngbe_dev_rss_reta_query,
2873         .rss_hash_update            = ngbe_dev_rss_hash_update,
2874         .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
2875         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
2876         .get_reg                    = ngbe_get_regs,
2877         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
2878         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
2879         .get_eeprom_length          = ngbe_get_eeprom_length,
2880         .get_eeprom                 = ngbe_get_eeprom,
2881         .set_eeprom                 = ngbe_set_eeprom,
2882 };
2883
2884 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
2885 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
2886 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
2887
2888 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
2889 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
2890
2891 #ifdef RTE_ETHDEV_DEBUG_RX
2892         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
2893 #endif
2894 #ifdef RTE_ETHDEV_DEBUG_TX
2895         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
2896 #endif