net/ngbe: fix Tx hang on queue disable
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17
18 static const struct reg_info ngbe_regs_general[] = {
19         {NGBE_RST, 1, 1, "NGBE_RST"},
20         {NGBE_STAT, 1, 1, "NGBE_STAT"},
21         {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22         {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23         {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24         {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25         {0, 0, 0, ""}
26 };
27
28 static const struct reg_info ngbe_regs_nvm[] = {
29         {0, 0, 0, ""}
30 };
31
32 static const struct reg_info ngbe_regs_interrupt[] = {
33         {0, 0, 0, ""}
34 };
35
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37         {0, 0, 0, ""}
38 };
39
40 static const struct reg_info ngbe_regs_rxdma[] = {
41         {0, 0, 0, ""}
42 };
43
44 static const struct reg_info ngbe_regs_rx[] = {
45         {0, 0, 0, ""}
46 };
47
48 static struct reg_info ngbe_regs_tx[] = {
49         {0, 0, 0, ""}
50 };
51
52 static const struct reg_info ngbe_regs_wakeup[] = {
53         {0, 0, 0, ""}
54 };
55
56 static const struct reg_info ngbe_regs_mac[] = {
57         {0, 0, 0, ""}
58 };
59
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61         {0, 0, 0, ""},
62 };
63
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66                                 ngbe_regs_general,
67                                 ngbe_regs_nvm,
68                                 ngbe_regs_interrupt,
69                                 ngbe_regs_fctl_others,
70                                 ngbe_regs_rxdma,
71                                 ngbe_regs_rx,
72                                 ngbe_regs_tx,
73                                 ngbe_regs_wakeup,
74                                 ngbe_regs_mac,
75                                 ngbe_regs_diagnostic,
76                                 NULL};
77
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80                                 int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84                                         uint16_t queue);
85
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97                 (h)->bitmap[idx] |= 1 << bit;\
98         } while (0)
99
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103                 (h)->bitmap[idx] &= ~(1 << bit);\
104         } while (0)
105
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109                 (r) = (h)->bitmap[idx] >> bit & 1;\
110         } while (0)
111
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128         { .vendor_id = 0, /* sentinel */ },
129 };
130
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132         .nb_max = NGBE_RING_DESC_MAX,
133         .nb_min = NGBE_RING_DESC_MIN,
134         .nb_align = NGBE_RXD_ALIGN,
135 };
136
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138         .nb_max = NGBE_RING_DESC_MAX,
139         .nb_min = NGBE_RING_DESC_MIN,
140         .nb_align = NGBE_TXD_ALIGN,
141         .nb_seg_max = NGBE_TX_MAX_SEG,
142         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150         /* MNG RxTx */
151         HW_XSTAT(mng_bmc2host_packets),
152         HW_XSTAT(mng_host2bmc_packets),
153         /* Basic RxTx */
154         HW_XSTAT(rx_packets),
155         HW_XSTAT(tx_packets),
156         HW_XSTAT(rx_bytes),
157         HW_XSTAT(tx_bytes),
158         HW_XSTAT(rx_total_bytes),
159         HW_XSTAT(rx_total_packets),
160         HW_XSTAT(tx_total_packets),
161         HW_XSTAT(rx_total_missed_packets),
162         HW_XSTAT(rx_broadcast_packets),
163         HW_XSTAT(rx_multicast_packets),
164         HW_XSTAT(rx_management_packets),
165         HW_XSTAT(tx_management_packets),
166         HW_XSTAT(rx_management_dropped),
167
168         /* Basic Error */
169         HW_XSTAT(rx_crc_errors),
170         HW_XSTAT(rx_illegal_byte_errors),
171         HW_XSTAT(rx_error_bytes),
172         HW_XSTAT(rx_mac_short_packet_dropped),
173         HW_XSTAT(rx_length_errors),
174         HW_XSTAT(rx_undersize_errors),
175         HW_XSTAT(rx_fragment_errors),
176         HW_XSTAT(rx_oversize_errors),
177         HW_XSTAT(rx_jabber_errors),
178         HW_XSTAT(rx_l3_l4_xsum_error),
179         HW_XSTAT(mac_local_errors),
180         HW_XSTAT(mac_remote_errors),
181
182         /* MACSEC */
183         HW_XSTAT(tx_macsec_pkts_untagged),
184         HW_XSTAT(tx_macsec_pkts_encrypted),
185         HW_XSTAT(tx_macsec_pkts_protected),
186         HW_XSTAT(tx_macsec_octets_encrypted),
187         HW_XSTAT(tx_macsec_octets_protected),
188         HW_XSTAT(rx_macsec_pkts_untagged),
189         HW_XSTAT(rx_macsec_pkts_badtag),
190         HW_XSTAT(rx_macsec_pkts_nosci),
191         HW_XSTAT(rx_macsec_pkts_unknownsci),
192         HW_XSTAT(rx_macsec_octets_decrypted),
193         HW_XSTAT(rx_macsec_octets_validated),
194         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
195         HW_XSTAT(rx_macsec_sc_pkts_delayed),
196         HW_XSTAT(rx_macsec_sc_pkts_late),
197         HW_XSTAT(rx_macsec_sa_pkts_ok),
198         HW_XSTAT(rx_macsec_sa_pkts_invalid),
199         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
200         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
201         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
202
203         /* MAC RxTx */
204         HW_XSTAT(rx_size_64_packets),
205         HW_XSTAT(rx_size_65_to_127_packets),
206         HW_XSTAT(rx_size_128_to_255_packets),
207         HW_XSTAT(rx_size_256_to_511_packets),
208         HW_XSTAT(rx_size_512_to_1023_packets),
209         HW_XSTAT(rx_size_1024_to_max_packets),
210         HW_XSTAT(tx_size_64_packets),
211         HW_XSTAT(tx_size_65_to_127_packets),
212         HW_XSTAT(tx_size_128_to_255_packets),
213         HW_XSTAT(tx_size_256_to_511_packets),
214         HW_XSTAT(tx_size_512_to_1023_packets),
215         HW_XSTAT(tx_size_1024_to_max_packets),
216
217         /* Flow Control */
218         HW_XSTAT(tx_xon_packets),
219         HW_XSTAT(rx_xon_packets),
220         HW_XSTAT(tx_xoff_packets),
221         HW_XSTAT(rx_xoff_packets),
222
223         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
224         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
225         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
226         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
227 };
228
229 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
230                            sizeof(rte_ngbe_stats_strings[0]))
231
232 /* Per-queue statistics */
233 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
234 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
235         QP_XSTAT(rx_qp_packets),
236         QP_XSTAT(tx_qp_packets),
237         QP_XSTAT(rx_qp_bytes),
238         QP_XSTAT(tx_qp_bytes),
239         QP_XSTAT(rx_qp_mc_packets),
240 };
241
242 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
243                            sizeof(rte_ngbe_qp_strings[0]))
244
245 static inline int32_t
246 ngbe_pf_reset_hw(struct ngbe_hw *hw)
247 {
248         uint32_t ctrl_ext;
249         int32_t status;
250
251         status = hw->mac.reset_hw(hw);
252
253         ctrl_ext = rd32(hw, NGBE_PORTCTL);
254         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
255         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
256         wr32(hw, NGBE_PORTCTL, ctrl_ext);
257         ngbe_flush(hw);
258
259         if (status == NGBE_ERR_SFP_NOT_PRESENT)
260                 status = 0;
261         return status;
262 }
263
264 static inline void
265 ngbe_enable_intr(struct rte_eth_dev *dev)
266 {
267         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
268         struct ngbe_hw *hw = ngbe_dev_hw(dev);
269
270         wr32(hw, NGBE_IENMISC, intr->mask_misc);
271         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
272         ngbe_flush(hw);
273 }
274
275 static void
276 ngbe_disable_intr(struct ngbe_hw *hw)
277 {
278         PMD_INIT_FUNC_TRACE();
279
280         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
281         ngbe_flush(hw);
282 }
283
284 /*
285  * Ensure that all locks are released before first NVM or PHY access
286  */
287 static void
288 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
289 {
290         uint16_t mask;
291
292         /*
293          * These ones are more tricky since they are common to all ports; but
294          * swfw_sync retries last long enough (1s) to be almost sure that if
295          * lock can not be taken it is due to an improper lock of the
296          * semaphore.
297          */
298         mask = NGBE_MNGSEM_SWPHY |
299                NGBE_MNGSEM_SWMBX |
300                NGBE_MNGSEM_SWFLASH;
301         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
302                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
303
304         hw->mac.release_swfw_sync(hw, mask);
305 }
306
307 static int
308 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
309 {
310         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
311         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
312         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
313         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
314         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
315         const struct rte_memzone *mz;
316         uint32_t ctrl_ext;
317         int err, ret;
318
319         PMD_INIT_FUNC_TRACE();
320
321         eth_dev->dev_ops = &ngbe_eth_dev_ops;
322         eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
323         eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
324         eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
325         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
326         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
327         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
328
329         /*
330          * For secondary processes, we don't initialise any further as primary
331          * has already done this work. Only check we don't need a different
332          * Rx and Tx function.
333          */
334         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
335                 struct ngbe_tx_queue *txq;
336                 /* Tx queue function in primary, set by last queue initialized
337                  * Tx queue may not initialized by primary process
338                  */
339                 if (eth_dev->data->tx_queues) {
340                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
341                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
342                         ngbe_set_tx_function(eth_dev, txq);
343                 } else {
344                         /* Use default Tx function if we get here */
345                         PMD_INIT_LOG(NOTICE,
346                                 "No Tx queues configured yet. Using default Tx function.");
347                 }
348
349                 ngbe_set_rx_function(eth_dev);
350
351                 return 0;
352         }
353
354         rte_eth_copy_pci_info(eth_dev, pci_dev);
355         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
356
357         /* Vendor and Device ID need to be set before init of shared code */
358         hw->device_id = pci_dev->id.device_id;
359         hw->vendor_id = pci_dev->id.vendor_id;
360         hw->sub_system_id = pci_dev->id.subsystem_device_id;
361         ngbe_map_device_id(hw);
362         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
363
364         /* Reserve memory for interrupt status block */
365         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
366                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
367         if (mz == NULL)
368                 return -ENOMEM;
369
370         hw->isb_dma = TMZ_PADDR(mz);
371         hw->isb_mem = TMZ_VADDR(mz);
372
373         /* Initialize the shared code (base driver) */
374         err = ngbe_init_shared_code(hw);
375         if (err != 0) {
376                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
377                 return -EIO;
378         }
379
380         /* Unlock any pending hardware semaphore */
381         ngbe_swfw_lock_reset(hw);
382
383         /* Get Hardware Flow Control setting */
384         hw->fc.requested_mode = ngbe_fc_full;
385         hw->fc.current_mode = ngbe_fc_full;
386         hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
387         hw->fc.low_water = NGBE_FC_XON_LOTH;
388         hw->fc.high_water = NGBE_FC_XOFF_HITH;
389         hw->fc.send_xon = 1;
390
391         err = hw->rom.init_params(hw);
392         if (err != 0) {
393                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
394                 return -EIO;
395         }
396
397         /* Make sure we have a good EEPROM before we read from it */
398         err = hw->rom.validate_checksum(hw, NULL);
399         if (err != 0) {
400                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
401                 return -EIO;
402         }
403
404         err = hw->mac.init_hw(hw);
405         if (err != 0) {
406                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
407                 return -EIO;
408         }
409
410         /* Reset the hw statistics */
411         ngbe_dev_stats_reset(eth_dev);
412
413         /* disable interrupt */
414         ngbe_disable_intr(hw);
415
416         /* Allocate memory for storing MAC addresses */
417         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
418                                                hw->mac.num_rar_entries, 0);
419         if (eth_dev->data->mac_addrs == NULL) {
420                 PMD_INIT_LOG(ERR,
421                              "Failed to allocate %u bytes needed to store MAC addresses",
422                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
423                 return -ENOMEM;
424         }
425
426         /* Copy the permanent MAC address */
427         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
428                         &eth_dev->data->mac_addrs[0]);
429
430         /* Allocate memory for storing hash filter MAC addresses */
431         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
432                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
433         if (eth_dev->data->hash_mac_addrs == NULL) {
434                 PMD_INIT_LOG(ERR,
435                              "Failed to allocate %d bytes needed to store MAC addresses",
436                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
437                 rte_free(eth_dev->data->mac_addrs);
438                 eth_dev->data->mac_addrs = NULL;
439                 return -ENOMEM;
440         }
441
442         /* initialize the vfta */
443         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
444
445         /* initialize the hw strip bitmap*/
446         memset(hwstrip, 0, sizeof(*hwstrip));
447
448         /* initialize PF if max_vfs not zero */
449         ret = ngbe_pf_host_init(eth_dev);
450         if (ret) {
451                 rte_free(eth_dev->data->mac_addrs);
452                 eth_dev->data->mac_addrs = NULL;
453                 rte_free(eth_dev->data->hash_mac_addrs);
454                 eth_dev->data->hash_mac_addrs = NULL;
455                 return ret;
456         }
457
458         ctrl_ext = rd32(hw, NGBE_PORTCTL);
459         /* let hardware know driver is loaded */
460         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
461         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
462         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
463         wr32(hw, NGBE_PORTCTL, ctrl_ext);
464         ngbe_flush(hw);
465
466         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
467                         (int)hw->mac.type, (int)hw->phy.type);
468
469         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
470                      eth_dev->data->port_id, pci_dev->id.vendor_id,
471                      pci_dev->id.device_id);
472
473         rte_intr_callback_register(intr_handle,
474                                    ngbe_dev_interrupt_handler, eth_dev);
475
476         /* enable uio/vfio intr/eventfd mapping */
477         rte_intr_enable(intr_handle);
478
479         /* enable support intr */
480         ngbe_enable_intr(eth_dev);
481
482         return 0;
483 }
484
485 static int
486 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
487 {
488         PMD_INIT_FUNC_TRACE();
489
490         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
491                 return 0;
492
493         ngbe_dev_close(eth_dev);
494
495         return 0;
496 }
497
498 static int
499 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
500                 struct rte_pci_device *pci_dev)
501 {
502         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
503                         sizeof(struct ngbe_adapter),
504                         eth_dev_pci_specific_init, pci_dev,
505                         eth_ngbe_dev_init, NULL);
506 }
507
508 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
509 {
510         struct rte_eth_dev *ethdev;
511
512         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
513         if (ethdev == NULL)
514                 return 0;
515
516         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
517 }
518
519 static struct rte_pci_driver rte_ngbe_pmd = {
520         .id_table = pci_id_ngbe_map,
521         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
522                      RTE_PCI_DRV_INTR_LSC,
523         .probe = eth_ngbe_pci_probe,
524         .remove = eth_ngbe_pci_remove,
525 };
526
527 static int
528 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
529 {
530         struct ngbe_hw *hw = ngbe_dev_hw(dev);
531         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
532         uint32_t vfta;
533         uint32_t vid_idx;
534         uint32_t vid_bit;
535
536         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
537         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
538         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
539         if (on)
540                 vfta |= vid_bit;
541         else
542                 vfta &= ~vid_bit;
543         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
544
545         /* update local VFTA copy */
546         shadow_vfta->vfta[vid_idx] = vfta;
547
548         return 0;
549 }
550
551 static void
552 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
553 {
554         struct ngbe_hw *hw = ngbe_dev_hw(dev);
555         struct ngbe_rx_queue *rxq;
556         bool restart;
557         uint32_t rxcfg, rxbal, rxbah;
558
559         if (on)
560                 ngbe_vlan_hw_strip_enable(dev, queue);
561         else
562                 ngbe_vlan_hw_strip_disable(dev, queue);
563
564         rxq = dev->data->rx_queues[queue];
565         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
566         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
567         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
568         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
569                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
570                         !(rxcfg & NGBE_RXCFG_VLAN);
571                 rxcfg |= NGBE_RXCFG_VLAN;
572         } else {
573                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
574                         (rxcfg & NGBE_RXCFG_VLAN);
575                 rxcfg &= ~NGBE_RXCFG_VLAN;
576         }
577         rxcfg &= ~NGBE_RXCFG_ENA;
578
579         if (restart) {
580                 /* set vlan strip for ring */
581                 ngbe_dev_rx_queue_stop(dev, queue);
582                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
583                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
584                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
585                 ngbe_dev_rx_queue_start(dev, queue);
586         }
587 }
588
589 static int
590 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
591                     enum rte_vlan_type vlan_type,
592                     uint16_t tpid)
593 {
594         struct ngbe_hw *hw = ngbe_dev_hw(dev);
595         int ret = 0;
596         uint32_t portctrl, vlan_ext, qinq;
597
598         portctrl = rd32(hw, NGBE_PORTCTL);
599
600         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
601         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
602         switch (vlan_type) {
603         case RTE_ETH_VLAN_TYPE_INNER:
604                 if (vlan_ext) {
605                         wr32m(hw, NGBE_VLANCTL,
606                                 NGBE_VLANCTL_TPID_MASK,
607                                 NGBE_VLANCTL_TPID(tpid));
608                         wr32m(hw, NGBE_DMATXCTRL,
609                                 NGBE_DMATXCTRL_TPID_MASK,
610                                 NGBE_DMATXCTRL_TPID(tpid));
611                 } else {
612                         ret = -ENOTSUP;
613                         PMD_DRV_LOG(ERR,
614                                 "Inner type is not supported by single VLAN");
615                 }
616
617                 if (qinq) {
618                         wr32m(hw, NGBE_TAGTPID(0),
619                                 NGBE_TAGTPID_LSB_MASK,
620                                 NGBE_TAGTPID_LSB(tpid));
621                 }
622                 break;
623         case RTE_ETH_VLAN_TYPE_OUTER:
624                 if (vlan_ext) {
625                         /* Only the high 16-bits is valid */
626                         wr32m(hw, NGBE_EXTAG,
627                                 NGBE_EXTAG_VLAN_MASK,
628                                 NGBE_EXTAG_VLAN(tpid));
629                 } else {
630                         wr32m(hw, NGBE_VLANCTL,
631                                 NGBE_VLANCTL_TPID_MASK,
632                                 NGBE_VLANCTL_TPID(tpid));
633                         wr32m(hw, NGBE_DMATXCTRL,
634                                 NGBE_DMATXCTRL_TPID_MASK,
635                                 NGBE_DMATXCTRL_TPID(tpid));
636                 }
637
638                 if (qinq) {
639                         wr32m(hw, NGBE_TAGTPID(0),
640                                 NGBE_TAGTPID_MSB_MASK,
641                                 NGBE_TAGTPID_MSB(tpid));
642                 }
643                 break;
644         default:
645                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
646                 return -EINVAL;
647         }
648
649         return ret;
650 }
651
652 void
653 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
654 {
655         struct ngbe_hw *hw = ngbe_dev_hw(dev);
656         uint32_t vlnctrl;
657
658         PMD_INIT_FUNC_TRACE();
659
660         /* Filter Table Disable */
661         vlnctrl = rd32(hw, NGBE_VLANCTL);
662         vlnctrl &= ~NGBE_VLANCTL_VFE;
663         wr32(hw, NGBE_VLANCTL, vlnctrl);
664 }
665
666 void
667 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
668 {
669         struct ngbe_hw *hw = ngbe_dev_hw(dev);
670         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
671         uint32_t vlnctrl;
672         uint16_t i;
673
674         PMD_INIT_FUNC_TRACE();
675
676         /* Filter Table Enable */
677         vlnctrl = rd32(hw, NGBE_VLANCTL);
678         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
679         vlnctrl |= NGBE_VLANCTL_VFE;
680         wr32(hw, NGBE_VLANCTL, vlnctrl);
681
682         /* write whatever is in local vfta copy */
683         for (i = 0; i < NGBE_VFTA_SIZE; i++)
684                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
685 }
686
687 void
688 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
689 {
690         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
691         struct ngbe_rx_queue *rxq;
692
693         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
694                 return;
695
696         if (on)
697                 NGBE_SET_HWSTRIP(hwstrip, queue);
698         else
699                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
700
701         if (queue >= dev->data->nb_rx_queues)
702                 return;
703
704         rxq = dev->data->rx_queues[queue];
705
706         if (on) {
707                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
708                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
709         } else {
710                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
711                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
712         }
713 }
714
715 static void
716 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
717 {
718         struct ngbe_hw *hw = ngbe_dev_hw(dev);
719         uint32_t ctrl;
720
721         PMD_INIT_FUNC_TRACE();
722
723         ctrl = rd32(hw, NGBE_RXCFG(queue));
724         ctrl &= ~NGBE_RXCFG_VLAN;
725         wr32(hw, NGBE_RXCFG(queue), ctrl);
726
727         /* record those setting for HW strip per queue */
728         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
729 }
730
731 static void
732 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
733 {
734         struct ngbe_hw *hw = ngbe_dev_hw(dev);
735         uint32_t ctrl;
736
737         PMD_INIT_FUNC_TRACE();
738
739         ctrl = rd32(hw, NGBE_RXCFG(queue));
740         ctrl |= NGBE_RXCFG_VLAN;
741         wr32(hw, NGBE_RXCFG(queue), ctrl);
742
743         /* record those setting for HW strip per queue */
744         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
745 }
746
747 static void
748 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
749 {
750         struct ngbe_hw *hw = ngbe_dev_hw(dev);
751         uint32_t ctrl;
752
753         PMD_INIT_FUNC_TRACE();
754
755         ctrl = rd32(hw, NGBE_PORTCTL);
756         ctrl &= ~NGBE_PORTCTL_VLANEXT;
757         ctrl &= ~NGBE_PORTCTL_QINQ;
758         wr32(hw, NGBE_PORTCTL, ctrl);
759 }
760
761 static void
762 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
763 {
764         struct ngbe_hw *hw = ngbe_dev_hw(dev);
765         uint32_t ctrl;
766
767         PMD_INIT_FUNC_TRACE();
768
769         ctrl  = rd32(hw, NGBE_PORTCTL);
770         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
771         wr32(hw, NGBE_PORTCTL, ctrl);
772 }
773
774 static void
775 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
776 {
777         struct ngbe_hw *hw = ngbe_dev_hw(dev);
778         uint32_t ctrl;
779
780         PMD_INIT_FUNC_TRACE();
781
782         ctrl = rd32(hw, NGBE_PORTCTL);
783         ctrl &= ~NGBE_PORTCTL_QINQ;
784         wr32(hw, NGBE_PORTCTL, ctrl);
785 }
786
787 static void
788 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
789 {
790         struct ngbe_hw *hw = ngbe_dev_hw(dev);
791         uint32_t ctrl;
792
793         PMD_INIT_FUNC_TRACE();
794
795         ctrl  = rd32(hw, NGBE_PORTCTL);
796         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
797         wr32(hw, NGBE_PORTCTL, ctrl);
798 }
799
800 void
801 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
802 {
803         struct ngbe_rx_queue *rxq;
804         uint16_t i;
805
806         PMD_INIT_FUNC_TRACE();
807
808         for (i = 0; i < dev->data->nb_rx_queues; i++) {
809                 rxq = dev->data->rx_queues[i];
810
811                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
812                         ngbe_vlan_hw_strip_enable(dev, i);
813                 else
814                         ngbe_vlan_hw_strip_disable(dev, i);
815         }
816 }
817
818 void
819 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
820 {
821         uint16_t i;
822         struct rte_eth_rxmode *rxmode;
823         struct ngbe_rx_queue *rxq;
824
825         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
826                 rxmode = &dev->data->dev_conf.rxmode;
827                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
828                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
829                                 rxq = dev->data->rx_queues[i];
830                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
831                         }
832                 else
833                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
834                                 rxq = dev->data->rx_queues[i];
835                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
836                         }
837         }
838 }
839
840 static int
841 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
842 {
843         struct rte_eth_rxmode *rxmode;
844         rxmode = &dev->data->dev_conf.rxmode;
845
846         if (mask & RTE_ETH_VLAN_STRIP_MASK)
847                 ngbe_vlan_hw_strip_config(dev);
848
849         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
850                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
851                         ngbe_vlan_hw_filter_enable(dev);
852                 else
853                         ngbe_vlan_hw_filter_disable(dev);
854         }
855
856         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
857                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
858                         ngbe_vlan_hw_extend_enable(dev);
859                 else
860                         ngbe_vlan_hw_extend_disable(dev);
861         }
862
863         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
864                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
865                         ngbe_qinq_hw_strip_enable(dev);
866                 else
867                         ngbe_qinq_hw_strip_disable(dev);
868         }
869
870         return 0;
871 }
872
873 static int
874 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
875 {
876         ngbe_config_vlan_strip_on_all_queues(dev, mask);
877
878         ngbe_vlan_offload_config(dev, mask);
879
880         return 0;
881 }
882
883 static int
884 ngbe_dev_configure(struct rte_eth_dev *dev)
885 {
886         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
887         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
888
889         PMD_INIT_FUNC_TRACE();
890
891         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
892                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
893
894         /* set flag to update link status after init */
895         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
896
897         /*
898          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
899          * allocation Rx preconditions we will reset it.
900          */
901         adapter->rx_bulk_alloc_allowed = true;
902
903         return 0;
904 }
905
906 static void
907 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
908 {
909         struct ngbe_hw *hw = ngbe_dev_hw(dev);
910         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
911
912         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
913         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
914         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
915         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
916                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
917         else
918                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
919
920         intr->mask_misc |= NGBE_ICRMISC_GPIO;
921 }
922
923 /*
924  * Configure device link speed and setup link.
925  * It returns 0 on success.
926  */
927 static int
928 ngbe_dev_start(struct rte_eth_dev *dev)
929 {
930         struct ngbe_hw *hw = ngbe_dev_hw(dev);
931         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
932         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
933         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
934         uint32_t intr_vector = 0;
935         int err;
936         bool link_up = false, negotiate = false;
937         uint32_t speed = 0;
938         uint32_t allowed_speeds = 0;
939         int mask = 0;
940         int status;
941         uint32_t *link_speeds;
942
943         PMD_INIT_FUNC_TRACE();
944
945         /* Stop the link setup handler before resetting the HW. */
946         rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
947
948         /* disable uio/vfio intr/eventfd mapping */
949         rte_intr_disable(intr_handle);
950
951         /* stop adapter */
952         hw->adapter_stopped = 0;
953
954         /* reinitialize adapter, this calls reset and start */
955         hw->nb_rx_queues = dev->data->nb_rx_queues;
956         hw->nb_tx_queues = dev->data->nb_tx_queues;
957         status = ngbe_pf_reset_hw(hw);
958         if (status != 0)
959                 return -1;
960         hw->mac.start_hw(hw);
961         hw->mac.get_link_status = true;
962
963         ngbe_set_pcie_master(hw, true);
964
965         /* configure PF module if SRIOV enabled */
966         ngbe_pf_host_configure(dev);
967
968         ngbe_dev_phy_intr_setup(dev);
969
970         /* check and configure queue intr-vector mapping */
971         if ((rte_intr_cap_multiple(intr_handle) ||
972              !RTE_ETH_DEV_SRIOV(dev).active) &&
973             dev->data->dev_conf.intr_conf.rxq != 0) {
974                 intr_vector = dev->data->nb_rx_queues;
975                 if (rte_intr_efd_enable(intr_handle, intr_vector))
976                         return -1;
977         }
978
979         if (rte_intr_dp_is_en(intr_handle)) {
980                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
981                                                    dev->data->nb_rx_queues)) {
982                         PMD_INIT_LOG(ERR,
983                                      "Failed to allocate %d rx_queues intr_vec",
984                                      dev->data->nb_rx_queues);
985                         return -ENOMEM;
986                 }
987         }
988
989         /* configure MSI-X for sleep until Rx interrupt */
990         ngbe_configure_msix(dev);
991
992         /* initialize transmission unit */
993         ngbe_dev_tx_init(dev);
994
995         /* This can fail when allocating mbufs for descriptor rings */
996         err = ngbe_dev_rx_init(dev);
997         if (err != 0) {
998                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
999                 goto error;
1000         }
1001
1002         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1003                 RTE_ETH_VLAN_EXTEND_MASK;
1004         err = ngbe_vlan_offload_config(dev, mask);
1005         if (err != 0) {
1006                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1007                 goto error;
1008         }
1009
1010         hw->mac.setup_pba(hw);
1011         ngbe_configure_port(dev);
1012
1013         err = ngbe_dev_rxtx_start(dev);
1014         if (err < 0) {
1015                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1016                 goto error;
1017         }
1018
1019         /* Skip link setup if loopback mode is enabled. */
1020         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1021                 goto skip_link_setup;
1022
1023         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1024         if (err != 0)
1025                 goto error;
1026         dev->data->dev_link.link_status = link_up;
1027
1028         link_speeds = &dev->data->dev_conf.link_speeds;
1029         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1030                 negotiate = true;
1031
1032         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1033         if (err != 0)
1034                 goto error;
1035
1036         allowed_speeds = 0;
1037         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1038                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1039         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1040                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1041         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1042                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1043
1044         if (*link_speeds & ~allowed_speeds) {
1045                 PMD_INIT_LOG(ERR, "Invalid link setting");
1046                 goto error;
1047         }
1048
1049         speed = 0x0;
1050         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1051                 speed = hw->mac.default_speeds;
1052         } else {
1053                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1054                         speed |= NGBE_LINK_SPEED_1GB_FULL;
1055                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1056                         speed |= NGBE_LINK_SPEED_100M_FULL;
1057                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1058                         speed |= NGBE_LINK_SPEED_10M_FULL;
1059         }
1060
1061         hw->phy.init_hw(hw);
1062         err = hw->mac.setup_link(hw, speed, link_up);
1063         if (err != 0)
1064                 goto error;
1065
1066 skip_link_setup:
1067
1068         if (rte_intr_allow_others(intr_handle)) {
1069                 ngbe_dev_misc_interrupt_setup(dev);
1070                 /* check if lsc interrupt is enabled */
1071                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1072                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1073                 else
1074                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1075                 ngbe_dev_macsec_interrupt_setup(dev);
1076                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1077         } else {
1078                 rte_intr_callback_unregister(intr_handle,
1079                                              ngbe_dev_interrupt_handler, dev);
1080                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1081                         PMD_INIT_LOG(INFO,
1082                                      "LSC won't enable because of no intr multiplex");
1083         }
1084
1085         /* check if rxq interrupt is enabled */
1086         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1087             rte_intr_dp_is_en(intr_handle))
1088                 ngbe_dev_rxq_interrupt_setup(dev);
1089
1090         /* enable UIO/VFIO intr/eventfd mapping */
1091         rte_intr_enable(intr_handle);
1092
1093         /* resume enabled intr since HW reset */
1094         ngbe_enable_intr(dev);
1095
1096         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1097                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1098                 /* gpio0 is used to power on/off control*/
1099                 wr32(hw, NGBE_GPIODATA, 0);
1100         }
1101
1102         /*
1103          * Update link status right before return, because it may
1104          * start link configuration process in a separate thread.
1105          */
1106         ngbe_dev_link_update(dev, 0);
1107
1108         ngbe_read_stats_registers(hw, hw_stats);
1109         hw->offset_loaded = 1;
1110
1111         return 0;
1112
1113 error:
1114         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1115         ngbe_dev_clear_queues(dev);
1116         return -EIO;
1117 }
1118
1119 /*
1120  * Stop device: disable rx and tx functions to allow for reconfiguring.
1121  */
1122 static int
1123 ngbe_dev_stop(struct rte_eth_dev *dev)
1124 {
1125         struct rte_eth_link link;
1126         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1127         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1128         struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1129         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1130         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1131         int vf;
1132
1133         if (hw->adapter_stopped)
1134                 return 0;
1135
1136         PMD_INIT_FUNC_TRACE();
1137
1138         rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1139
1140         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1141                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1142                 /* gpio0 is used to power on/off control*/
1143                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1144         }
1145
1146         /* disable interrupts */
1147         ngbe_disable_intr(hw);
1148
1149         /* reset the NIC */
1150         ngbe_pf_reset_hw(hw);
1151         hw->adapter_stopped = 0;
1152
1153         /* stop adapter */
1154         ngbe_stop_hw(hw);
1155
1156         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1157                 vfinfo[vf].clear_to_send = false;
1158
1159         ngbe_dev_clear_queues(dev);
1160
1161         /* Clear stored conf */
1162         dev->data->scattered_rx = 0;
1163
1164         /* Clear recorded link status */
1165         memset(&link, 0, sizeof(link));
1166         rte_eth_linkstatus_set(dev, &link);
1167
1168         if (!rte_intr_allow_others(intr_handle))
1169                 /* resume to the default handler */
1170                 rte_intr_callback_register(intr_handle,
1171                                            ngbe_dev_interrupt_handler,
1172                                            (void *)dev);
1173
1174         /* Clean datapath event and queue/vec mapping */
1175         rte_intr_efd_disable(intr_handle);
1176         rte_intr_vec_list_free(intr_handle);
1177
1178         ngbe_set_pcie_master(hw, true);
1179
1180         adapter->rss_reta_updated = 0;
1181
1182         hw->adapter_stopped = true;
1183         dev->data->dev_started = 0;
1184
1185         return 0;
1186 }
1187
1188 /*
1189  * Reset and stop device.
1190  */
1191 static int
1192 ngbe_dev_close(struct rte_eth_dev *dev)
1193 {
1194         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1195         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1196         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1197         int retries = 0;
1198         int ret;
1199
1200         PMD_INIT_FUNC_TRACE();
1201
1202         ngbe_pf_reset_hw(hw);
1203
1204         ngbe_dev_stop(dev);
1205
1206         ngbe_dev_free_queues(dev);
1207
1208         ngbe_set_pcie_master(hw, false);
1209
1210         /* reprogram the RAR[0] in case user changed it. */
1211         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1212
1213         /* Unlock any pending hardware semaphore */
1214         ngbe_swfw_lock_reset(hw);
1215
1216         /* disable uio intr before callback unregister */
1217         rte_intr_disable(intr_handle);
1218
1219         do {
1220                 ret = rte_intr_callback_unregister(intr_handle,
1221                                 ngbe_dev_interrupt_handler, dev);
1222                 if (ret >= 0 || ret == -ENOENT) {
1223                         break;
1224                 } else if (ret != -EAGAIN) {
1225                         PMD_INIT_LOG(ERR,
1226                                 "intr callback unregister failed: %d",
1227                                 ret);
1228                 }
1229                 rte_delay_ms(100);
1230         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1231
1232         /* uninitialize PF if max_vfs not zero */
1233         ngbe_pf_host_uninit(dev);
1234
1235         rte_free(dev->data->mac_addrs);
1236         dev->data->mac_addrs = NULL;
1237
1238         rte_free(dev->data->hash_mac_addrs);
1239         dev->data->hash_mac_addrs = NULL;
1240
1241         return ret;
1242 }
1243
1244 /*
1245  * Reset PF device.
1246  */
1247 static int
1248 ngbe_dev_reset(struct rte_eth_dev *dev)
1249 {
1250         int ret;
1251
1252         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1253          * its VF to make them align with it. The detailed notification
1254          * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1255          * To avoid unexpected behavior in VF, currently reset of PF with
1256          * SR-IOV activation is not supported. It might be supported later.
1257          */
1258         if (dev->data->sriov.active)
1259                 return -ENOTSUP;
1260
1261         ret = eth_ngbe_dev_uninit(dev);
1262         if (ret != 0)
1263                 return ret;
1264
1265         ret = eth_ngbe_dev_init(dev, NULL);
1266
1267         return ret;
1268 }
1269
1270 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1271         {                                                       \
1272                 uint32_t current_counter = rd32(hw, reg);       \
1273                 if (current_counter < last_counter)             \
1274                         current_counter += 0x100000000LL;       \
1275                 if (!hw->offset_loaded)                         \
1276                         last_counter = current_counter;         \
1277                 counter = current_counter - last_counter;       \
1278                 counter &= 0xFFFFFFFFLL;                        \
1279         }
1280
1281 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1282         {                                                                \
1283                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1284                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1285                 uint64_t current_counter = (current_counter_msb << 32) | \
1286                         current_counter_lsb;                             \
1287                 if (current_counter < last_counter)                      \
1288                         current_counter += 0x1000000000LL;               \
1289                 if (!hw->offset_loaded)                                  \
1290                         last_counter = current_counter;                  \
1291                 counter = current_counter - last_counter;                \
1292                 counter &= 0xFFFFFFFFFLL;                                \
1293         }
1294
1295 void
1296 ngbe_read_stats_registers(struct ngbe_hw *hw,
1297                            struct ngbe_hw_stats *hw_stats)
1298 {
1299         unsigned int i;
1300
1301         /* QP Stats */
1302         for (i = 0; i < hw->nb_rx_queues; i++) {
1303                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1304                         hw->qp_last[i].rx_qp_packets,
1305                         hw_stats->qp[i].rx_qp_packets);
1306                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1307                         hw->qp_last[i].rx_qp_bytes,
1308                         hw_stats->qp[i].rx_qp_bytes);
1309                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1310                         hw->qp_last[i].rx_qp_mc_packets,
1311                         hw_stats->qp[i].rx_qp_mc_packets);
1312                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1313                         hw->qp_last[i].rx_qp_bc_packets,
1314                         hw_stats->qp[i].rx_qp_bc_packets);
1315         }
1316
1317         for (i = 0; i < hw->nb_tx_queues; i++) {
1318                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1319                         hw->qp_last[i].tx_qp_packets,
1320                         hw_stats->qp[i].tx_qp_packets);
1321                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1322                         hw->qp_last[i].tx_qp_bytes,
1323                         hw_stats->qp[i].tx_qp_bytes);
1324                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1325                         hw->qp_last[i].tx_qp_mc_packets,
1326                         hw_stats->qp[i].tx_qp_mc_packets);
1327                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1328                         hw->qp_last[i].tx_qp_bc_packets,
1329                         hw_stats->qp[i].tx_qp_bc_packets);
1330         }
1331
1332         /* PB Stats */
1333         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1334         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1335         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1336         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1337         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1338         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1339
1340         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1341         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1342
1343         /* DMA Stats */
1344         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1345         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1346         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1347         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1348         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1349         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1350         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1351         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1352
1353         /* MAC Stats */
1354         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1355         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1356         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1357
1358         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1359         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1360         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1361
1362         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1363         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1364
1365         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1366         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1367         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1368         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1369         hw_stats->rx_size_512_to_1023_packets +=
1370                         rd64(hw, NGBE_MACRX512TO1023L);
1371         hw_stats->rx_size_1024_to_max_packets +=
1372                         rd64(hw, NGBE_MACRX1024TOMAXL);
1373         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1374         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1375         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1376         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1377         hw_stats->tx_size_512_to_1023_packets +=
1378                         rd64(hw, NGBE_MACTX512TO1023L);
1379         hw_stats->tx_size_1024_to_max_packets +=
1380                         rd64(hw, NGBE_MACTX1024TOMAXL);
1381
1382         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1383         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1384         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1385
1386         /* MNG Stats */
1387         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1388         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1389         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1390         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1391
1392         /* MACsec Stats */
1393         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1394         hw_stats->tx_macsec_pkts_encrypted +=
1395                         rd32(hw, NGBE_LSECTX_ENCPKT);
1396         hw_stats->tx_macsec_pkts_protected +=
1397                         rd32(hw, NGBE_LSECTX_PROTPKT);
1398         hw_stats->tx_macsec_octets_encrypted +=
1399                         rd32(hw, NGBE_LSECTX_ENCOCT);
1400         hw_stats->tx_macsec_octets_protected +=
1401                         rd32(hw, NGBE_LSECTX_PROTOCT);
1402         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1403         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1404         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1405         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1406         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1407         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1408         hw_stats->rx_macsec_sc_pkts_unchecked +=
1409                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1410         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1411         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1412         for (i = 0; i < 2; i++) {
1413                 hw_stats->rx_macsec_sa_pkts_ok +=
1414                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1415                 hw_stats->rx_macsec_sa_pkts_invalid +=
1416                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1417                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1418                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1419         }
1420         for (i = 0; i < 4; i++) {
1421                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1422                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1423                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1424                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1425         }
1426         hw_stats->rx_total_missed_packets =
1427                         hw_stats->rx_up_dropped;
1428 }
1429
1430 static int
1431 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1432 {
1433         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1434         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1435         struct ngbe_stat_mappings *stat_mappings =
1436                         NGBE_DEV_STAT_MAPPINGS(dev);
1437         uint32_t i, j;
1438
1439         ngbe_read_stats_registers(hw, hw_stats);
1440
1441         if (stats == NULL)
1442                 return -EINVAL;
1443
1444         /* Fill out the rte_eth_stats statistics structure */
1445         stats->ipackets = hw_stats->rx_packets;
1446         stats->ibytes = hw_stats->rx_bytes;
1447         stats->opackets = hw_stats->tx_packets;
1448         stats->obytes = hw_stats->tx_bytes;
1449
1450         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1451         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1452         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1453         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1454         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1455         for (i = 0; i < NGBE_MAX_QP; i++) {
1456                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1457                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1458                 uint32_t q_map;
1459
1460                 q_map = (stat_mappings->rqsm[n] >> offset)
1461                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1462                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1463                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1464                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1465                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1466
1467                 q_map = (stat_mappings->tqsm[n] >> offset)
1468                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1469                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1470                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1471                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1472                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1473         }
1474
1475         /* Rx Errors */
1476         stats->imissed  = hw_stats->rx_total_missed_packets +
1477                           hw_stats->rx_dma_drop;
1478         stats->ierrors  = hw_stats->rx_crc_errors +
1479                           hw_stats->rx_mac_short_packet_dropped +
1480                           hw_stats->rx_length_errors +
1481                           hw_stats->rx_undersize_errors +
1482                           hw_stats->rx_oversize_errors +
1483                           hw_stats->rx_illegal_byte_errors +
1484                           hw_stats->rx_error_bytes +
1485                           hw_stats->rx_fragment_errors;
1486
1487         /* Tx Errors */
1488         stats->oerrors  = 0;
1489         return 0;
1490 }
1491
1492 static int
1493 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1494 {
1495         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1496         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1497
1498         /* HW registers are cleared on read */
1499         hw->offset_loaded = 0;
1500         ngbe_dev_stats_get(dev, NULL);
1501         hw->offset_loaded = 1;
1502
1503         /* Reset software totals */
1504         memset(hw_stats, 0, sizeof(*hw_stats));
1505
1506         return 0;
1507 }
1508
1509 /* This function calculates the number of xstats based on the current config */
1510 static unsigned
1511 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1512 {
1513         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1514         return NGBE_NB_HW_STATS +
1515                NGBE_NB_QP_STATS * nb_queues;
1516 }
1517
1518 static inline int
1519 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1520 {
1521         int nb, st;
1522
1523         /* Extended stats from ngbe_hw_stats */
1524         if (id < NGBE_NB_HW_STATS) {
1525                 snprintf(name, size, "[hw]%s",
1526                         rte_ngbe_stats_strings[id].name);
1527                 return 0;
1528         }
1529         id -= NGBE_NB_HW_STATS;
1530
1531         /* Queue Stats */
1532         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1533                 nb = id / NGBE_NB_QP_STATS;
1534                 st = id % NGBE_NB_QP_STATS;
1535                 snprintf(name, size, "[q%u]%s", nb,
1536                         rte_ngbe_qp_strings[st].name);
1537                 return 0;
1538         }
1539         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1540
1541         return -(int)(id + 1);
1542 }
1543
1544 static inline int
1545 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1546 {
1547         int nb, st;
1548
1549         /* Extended stats from ngbe_hw_stats */
1550         if (id < NGBE_NB_HW_STATS) {
1551                 *offset = rte_ngbe_stats_strings[id].offset;
1552                 return 0;
1553         }
1554         id -= NGBE_NB_HW_STATS;
1555
1556         /* Queue Stats */
1557         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1558                 nb = id / NGBE_NB_QP_STATS;
1559                 st = id % NGBE_NB_QP_STATS;
1560                 *offset = rte_ngbe_qp_strings[st].offset +
1561                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1562                 return 0;
1563         }
1564
1565         return -1;
1566 }
1567
1568 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1569         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1570 {
1571         unsigned int i, count;
1572
1573         count = ngbe_xstats_calc_num(dev);
1574         if (xstats_names == NULL)
1575                 return count;
1576
1577         /* Note: limit >= cnt_stats checked upstream
1578          * in rte_eth_xstats_names()
1579          */
1580         limit = min(limit, count);
1581
1582         /* Extended stats from ngbe_hw_stats */
1583         for (i = 0; i < limit; i++) {
1584                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1585                         sizeof(xstats_names[i].name))) {
1586                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1587                         break;
1588                 }
1589         }
1590
1591         return i;
1592 }
1593
1594 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1595         const uint64_t *ids,
1596         struct rte_eth_xstat_name *xstats_names,
1597         unsigned int limit)
1598 {
1599         unsigned int i;
1600
1601         if (ids == NULL)
1602                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1603
1604         for (i = 0; i < limit; i++) {
1605                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1606                                 sizeof(xstats_names[i].name))) {
1607                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1608                         return -1;
1609                 }
1610         }
1611
1612         return i;
1613 }
1614
1615 static int
1616 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1617                                          unsigned int limit)
1618 {
1619         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1620         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1621         unsigned int i, count;
1622
1623         ngbe_read_stats_registers(hw, hw_stats);
1624
1625         /* If this is a reset xstats is NULL, and we have cleared the
1626          * registers by reading them.
1627          */
1628         count = ngbe_xstats_calc_num(dev);
1629         if (xstats == NULL)
1630                 return count;
1631
1632         limit = min(limit, ngbe_xstats_calc_num(dev));
1633
1634         /* Extended stats from ngbe_hw_stats */
1635         for (i = 0; i < limit; i++) {
1636                 uint32_t offset = 0;
1637
1638                 if (ngbe_get_offset_by_id(i, &offset)) {
1639                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1640                         break;
1641                 }
1642                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1643                 xstats[i].id = i;
1644         }
1645
1646         return i;
1647 }
1648
1649 static int
1650 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1651                                          unsigned int limit)
1652 {
1653         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1654         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1655         unsigned int i, count;
1656
1657         ngbe_read_stats_registers(hw, hw_stats);
1658
1659         /* If this is a reset xstats is NULL, and we have cleared the
1660          * registers by reading them.
1661          */
1662         count = ngbe_xstats_calc_num(dev);
1663         if (values == NULL)
1664                 return count;
1665
1666         limit = min(limit, ngbe_xstats_calc_num(dev));
1667
1668         /* Extended stats from ngbe_hw_stats */
1669         for (i = 0; i < limit; i++) {
1670                 uint32_t offset;
1671
1672                 if (ngbe_get_offset_by_id(i, &offset)) {
1673                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1674                         break;
1675                 }
1676                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1677         }
1678
1679         return i;
1680 }
1681
1682 static int
1683 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1684                 uint64_t *values, unsigned int limit)
1685 {
1686         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1687         unsigned int i;
1688
1689         if (ids == NULL)
1690                 return ngbe_dev_xstats_get_(dev, values, limit);
1691
1692         for (i = 0; i < limit; i++) {
1693                 uint32_t offset;
1694
1695                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1696                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1697                         break;
1698                 }
1699                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1700         }
1701
1702         return i;
1703 }
1704
1705 static int
1706 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1707 {
1708         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1709         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1710
1711         /* HW registers are cleared on read */
1712         hw->offset_loaded = 0;
1713         ngbe_read_stats_registers(hw, hw_stats);
1714         hw->offset_loaded = 1;
1715
1716         /* Reset software totals */
1717         memset(hw_stats, 0, sizeof(*hw_stats));
1718
1719         return 0;
1720 }
1721
1722 static int
1723 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1724 {
1725         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1726         int ret;
1727
1728         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1729
1730         if (ret < 0)
1731                 return -EINVAL;
1732
1733         ret += 1; /* add the size of '\0' */
1734         if (fw_size < (size_t)ret)
1735                 return ret;
1736
1737         return 0;
1738 }
1739
1740 static int
1741 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1742 {
1743         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1744         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1745
1746         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1747         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1748         dev_info->min_rx_bufsize = 1024;
1749         dev_info->max_rx_pktlen = 15872;
1750         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1751         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1752         dev_info->max_vfs = pci_dev->max_vfs;
1753         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1754         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1755                                      dev_info->rx_queue_offload_capa);
1756         dev_info->tx_queue_offload_capa = 0;
1757         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1758
1759         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1760                 .rx_thresh = {
1761                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1762                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1763                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1764                 },
1765                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1766                 .rx_drop_en = 0,
1767                 .offloads = 0,
1768         };
1769
1770         dev_info->default_txconf = (struct rte_eth_txconf) {
1771                 .tx_thresh = {
1772                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1773                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1774                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1775                 },
1776                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1777                 .offloads = 0,
1778         };
1779
1780         dev_info->rx_desc_lim = rx_desc_lim;
1781         dev_info->tx_desc_lim = tx_desc_lim;
1782
1783         dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1784         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1785         dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1786
1787         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1788                                 RTE_ETH_LINK_SPEED_10M;
1789
1790         /* Driver-preferred Rx/Tx parameters */
1791         dev_info->default_rxportconf.burst_size = 32;
1792         dev_info->default_txportconf.burst_size = 32;
1793         dev_info->default_rxportconf.nb_queues = 1;
1794         dev_info->default_txportconf.nb_queues = 1;
1795         dev_info->default_rxportconf.ring_size = 256;
1796         dev_info->default_txportconf.ring_size = 256;
1797
1798         return 0;
1799 }
1800
1801 const uint32_t *
1802 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1803 {
1804         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1805             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1806             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1807             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1808                 return ngbe_get_supported_ptypes();
1809
1810         return NULL;
1811 }
1812
1813 void
1814 ngbe_dev_setup_link_alarm_handler(void *param)
1815 {
1816         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1817         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1818         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1819         u32 speed;
1820         bool autoneg = false;
1821
1822         speed = hw->phy.autoneg_advertised;
1823         if (!speed)
1824                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1825
1826         hw->mac.setup_link(hw, speed, true);
1827
1828         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1829 }
1830
1831 /* return 0 means link status changed, -1 means not changed */
1832 int
1833 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1834                             int wait_to_complete)
1835 {
1836         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1837         struct rte_eth_link link;
1838         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1839         u32 lan_speed = 0;
1840         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1841         bool link_up;
1842         int err;
1843         int wait = 1;
1844
1845         memset(&link, 0, sizeof(link));
1846         link.link_status = RTE_ETH_LINK_DOWN;
1847         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1848         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1849         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1850                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1851
1852         hw->mac.get_link_status = true;
1853
1854         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1855                 return rte_eth_linkstatus_set(dev, &link);
1856
1857         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1858         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1859                 wait = 0;
1860
1861         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1862         if (err != 0) {
1863                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1864                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1865                 return rte_eth_linkstatus_set(dev, &link);
1866         }
1867
1868         if (!link_up) {
1869                 if (hw->phy.media_type == ngbe_media_type_fiber &&
1870                         hw->phy.type != ngbe_phy_mvl_sfi) {
1871                         intr->flags |= NGBE_FLAG_NEED_LINK_CONFIG;
1872                         rte_eal_alarm_set(10,
1873                                 ngbe_dev_setup_link_alarm_handler, dev);
1874                 }
1875
1876                 return rte_eth_linkstatus_set(dev, &link);
1877         }
1878
1879         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1880         link.link_status = RTE_ETH_LINK_UP;
1881         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1882
1883         switch (link_speed) {
1884         default:
1885         case NGBE_LINK_SPEED_UNKNOWN:
1886                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1887                 break;
1888
1889         case NGBE_LINK_SPEED_10M_FULL:
1890                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1891                 lan_speed = 0;
1892                 break;
1893
1894         case NGBE_LINK_SPEED_100M_FULL:
1895                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1896                 lan_speed = 1;
1897                 break;
1898
1899         case NGBE_LINK_SPEED_1GB_FULL:
1900                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1901                 lan_speed = 2;
1902                 break;
1903         }
1904
1905         if (hw->is_pf) {
1906                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1907                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1908                                 NGBE_LINK_SPEED_100M_FULL |
1909                                 NGBE_LINK_SPEED_10M_FULL)) {
1910                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1911                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1912                 }
1913         }
1914
1915         return rte_eth_linkstatus_set(dev, &link);
1916 }
1917
1918 static int
1919 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1920 {
1921         return ngbe_dev_link_update_share(dev, wait_to_complete);
1922 }
1923
1924 static int
1925 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1926 {
1927         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1928         uint32_t fctrl;
1929
1930         fctrl = rd32(hw, NGBE_PSRCTL);
1931         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1932         wr32(hw, NGBE_PSRCTL, fctrl);
1933
1934         return 0;
1935 }
1936
1937 static int
1938 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1939 {
1940         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1941         uint32_t fctrl;
1942
1943         fctrl = rd32(hw, NGBE_PSRCTL);
1944         fctrl &= (~NGBE_PSRCTL_UCP);
1945         if (dev->data->all_multicast == 1)
1946                 fctrl |= NGBE_PSRCTL_MCP;
1947         else
1948                 fctrl &= (~NGBE_PSRCTL_MCP);
1949         wr32(hw, NGBE_PSRCTL, fctrl);
1950
1951         return 0;
1952 }
1953
1954 static int
1955 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1956 {
1957         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1958         uint32_t fctrl;
1959
1960         fctrl = rd32(hw, NGBE_PSRCTL);
1961         fctrl |= NGBE_PSRCTL_MCP;
1962         wr32(hw, NGBE_PSRCTL, fctrl);
1963
1964         return 0;
1965 }
1966
1967 static int
1968 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1969 {
1970         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1971         uint32_t fctrl;
1972
1973         if (dev->data->promiscuous == 1)
1974                 return 0; /* must remain in all_multicast mode */
1975
1976         fctrl = rd32(hw, NGBE_PSRCTL);
1977         fctrl &= (~NGBE_PSRCTL_MCP);
1978         wr32(hw, NGBE_PSRCTL, fctrl);
1979
1980         return 0;
1981 }
1982
1983 /**
1984  * It clears the interrupt causes and enables the interrupt.
1985  * It will be called once only during NIC initialized.
1986  *
1987  * @param dev
1988  *  Pointer to struct rte_eth_dev.
1989  * @param on
1990  *  Enable or Disable.
1991  *
1992  * @return
1993  *  - On success, zero.
1994  *  - On failure, a negative value.
1995  */
1996 static int
1997 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1998 {
1999         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2000
2001         ngbe_dev_link_status_print(dev);
2002         if (on != 0) {
2003                 intr->mask_misc |= NGBE_ICRMISC_PHY;
2004                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
2005         } else {
2006                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2007                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2008         }
2009
2010         return 0;
2011 }
2012
2013 /**
2014  * It clears the interrupt causes and enables the interrupt.
2015  * It will be called once only during NIC initialized.
2016  *
2017  * @param dev
2018  *  Pointer to struct rte_eth_dev.
2019  *
2020  * @return
2021  *  - On success, zero.
2022  *  - On failure, a negative value.
2023  */
2024 static int
2025 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2026 {
2027         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2028         u64 mask;
2029
2030         mask = NGBE_ICR_MASK;
2031         mask &= (1ULL << NGBE_MISC_VEC_ID);
2032         intr->mask |= mask;
2033         intr->mask_misc |= NGBE_ICRMISC_GPIO;
2034
2035         return 0;
2036 }
2037
2038 /**
2039  * It clears the interrupt causes and enables the interrupt.
2040  * It will be called once only during NIC initialized.
2041  *
2042  * @param dev
2043  *  Pointer to struct rte_eth_dev.
2044  *
2045  * @return
2046  *  - On success, zero.
2047  *  - On failure, a negative value.
2048  */
2049 static int
2050 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2051 {
2052         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2053         u64 mask;
2054
2055         mask = NGBE_ICR_MASK;
2056         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2057         intr->mask |= mask;
2058
2059         return 0;
2060 }
2061
2062 /**
2063  * It clears the interrupt causes and enables the interrupt.
2064  * It will be called once only during NIC initialized.
2065  *
2066  * @param dev
2067  *  Pointer to struct rte_eth_dev.
2068  *
2069  * @return
2070  *  - On success, zero.
2071  *  - On failure, a negative value.
2072  */
2073 static int
2074 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2075 {
2076         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2077
2078         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2079
2080         return 0;
2081 }
2082
2083 /*
2084  * It reads ICR and sets flag for the link_update.
2085  *
2086  * @param dev
2087  *  Pointer to struct rte_eth_dev.
2088  *
2089  * @return
2090  *  - On success, zero.
2091  *  - On failure, a negative value.
2092  */
2093 static int
2094 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2095 {
2096         uint32_t eicr;
2097         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2098         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2099
2100         /* read-on-clear nic registers here */
2101         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2102         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2103
2104         intr->flags = 0;
2105
2106         /* set flag for async link update */
2107         if (eicr & NGBE_ICRMISC_PHY)
2108                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2109
2110         if (eicr & NGBE_ICRMISC_VFMBX)
2111                 intr->flags |= NGBE_FLAG_MAILBOX;
2112
2113         if (eicr & NGBE_ICRMISC_LNKSEC)
2114                 intr->flags |= NGBE_FLAG_MACSEC;
2115
2116         if (eicr & NGBE_ICRMISC_GPIO)
2117                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2118
2119         ((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2120
2121         return 0;
2122 }
2123
2124 /**
2125  * It gets and then prints the link status.
2126  *
2127  * @param dev
2128  *  Pointer to struct rte_eth_dev.
2129  *
2130  * @return
2131  *  - On success, zero.
2132  *  - On failure, a negative value.
2133  */
2134 static void
2135 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2136 {
2137         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2138         struct rte_eth_link link;
2139
2140         rte_eth_linkstatus_get(dev, &link);
2141
2142         if (link.link_status == RTE_ETH_LINK_UP) {
2143                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2144                                         (int)(dev->data->port_id),
2145                                         (unsigned int)link.link_speed,
2146                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2147                                         "full-duplex" : "half-duplex");
2148         } else {
2149                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2150                                 (int)(dev->data->port_id));
2151         }
2152         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2153                                 pci_dev->addr.domain,
2154                                 pci_dev->addr.bus,
2155                                 pci_dev->addr.devid,
2156                                 pci_dev->addr.function);
2157 }
2158
2159 /*
2160  * It executes link_update after knowing an interrupt occurred.
2161  *
2162  * @param dev
2163  *  Pointer to struct rte_eth_dev.
2164  *
2165  * @return
2166  *  - On success, zero.
2167  *  - On failure, a negative value.
2168  */
2169 static int
2170 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2171 {
2172         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2173
2174         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2175
2176         if (intr->flags & NGBE_FLAG_MAILBOX) {
2177                 ngbe_pf_mbx_process(dev);
2178                 intr->flags &= ~NGBE_FLAG_MAILBOX;
2179         }
2180
2181         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2182                 struct rte_eth_link link;
2183
2184                 /*get the link status before link update, for predicting later*/
2185                 rte_eth_linkstatus_get(dev, &link);
2186
2187                 ngbe_dev_link_update(dev, 0);
2188                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2189                 ngbe_dev_link_status_print(dev);
2190                 if (dev->data->dev_link.link_speed != link.link_speed)
2191                         rte_eth_dev_callback_process(dev,
2192                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2193         }
2194
2195         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2196         ngbe_enable_intr(dev);
2197
2198         return 0;
2199 }
2200
2201 /**
2202  * Interrupt handler triggered by NIC  for handling
2203  * specific interrupt.
2204  *
2205  * @param param
2206  *  The address of parameter (struct rte_eth_dev *) registered before.
2207  */
2208 static void
2209 ngbe_dev_interrupt_handler(void *param)
2210 {
2211         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2212
2213         ngbe_dev_interrupt_get_status(dev);
2214         ngbe_dev_interrupt_action(dev);
2215 }
2216
2217 static int
2218 ngbe_dev_led_on(struct rte_eth_dev *dev)
2219 {
2220         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2221         return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2222 }
2223
2224 static int
2225 ngbe_dev_led_off(struct rte_eth_dev *dev)
2226 {
2227         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2228         return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2229 }
2230
2231 static int
2232 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2233 {
2234         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2235         uint32_t mflcn_reg;
2236         uint32_t fccfg_reg;
2237         int rx_pause;
2238         int tx_pause;
2239
2240         fc_conf->pause_time = hw->fc.pause_time;
2241         fc_conf->high_water = hw->fc.high_water;
2242         fc_conf->low_water = hw->fc.low_water;
2243         fc_conf->send_xon = hw->fc.send_xon;
2244         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2245
2246         /*
2247          * Return rx_pause status according to actual setting of
2248          * RXFCCFG register.
2249          */
2250         mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2251         if (mflcn_reg & NGBE_RXFCCFG_FC)
2252                 rx_pause = 1;
2253         else
2254                 rx_pause = 0;
2255
2256         /*
2257          * Return tx_pause status according to actual setting of
2258          * TXFCCFG register.
2259          */
2260         fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2261         if (fccfg_reg & NGBE_TXFCCFG_FC)
2262                 tx_pause = 1;
2263         else
2264                 tx_pause = 0;
2265
2266         if (rx_pause && tx_pause)
2267                 fc_conf->mode = RTE_ETH_FC_FULL;
2268         else if (rx_pause)
2269                 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2270         else if (tx_pause)
2271                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2272         else
2273                 fc_conf->mode = RTE_ETH_FC_NONE;
2274
2275         return 0;
2276 }
2277
2278 static int
2279 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2280 {
2281         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2282         int err;
2283         uint32_t rx_buf_size;
2284         uint32_t max_high_water;
2285         enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2286                 ngbe_fc_none,
2287                 ngbe_fc_rx_pause,
2288                 ngbe_fc_tx_pause,
2289                 ngbe_fc_full
2290         };
2291
2292         PMD_INIT_FUNC_TRACE();
2293
2294         rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2295         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2296
2297         /*
2298          * At least reserve one Ethernet frame for watermark
2299          * high_water/low_water in kilo bytes for ngbe
2300          */
2301         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2302         if (fc_conf->high_water > max_high_water ||
2303             fc_conf->high_water < fc_conf->low_water) {
2304                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2305                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2306                 return -EINVAL;
2307         }
2308
2309         hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2310         hw->fc.pause_time     = fc_conf->pause_time;
2311         hw->fc.high_water     = fc_conf->high_water;
2312         hw->fc.low_water      = fc_conf->low_water;
2313         hw->fc.send_xon       = fc_conf->send_xon;
2314         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2315
2316         err = hw->mac.fc_enable(hw);
2317
2318         /* Not negotiated is not an error case */
2319         if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2320                 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2321                       (fc_conf->mac_ctrl_frame_fwd
2322                        ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2323                 ngbe_flush(hw);
2324
2325                 return 0;
2326         }
2327
2328         PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2329         return -EIO;
2330 }
2331
2332 int
2333 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2334                           struct rte_eth_rss_reta_entry64 *reta_conf,
2335                           uint16_t reta_size)
2336 {
2337         uint8_t i, j, mask;
2338         uint32_t reta;
2339         uint16_t idx, shift;
2340         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2341         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2342
2343         PMD_INIT_FUNC_TRACE();
2344
2345         if (!hw->is_pf) {
2346                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2347                         "NIC.");
2348                 return -ENOTSUP;
2349         }
2350
2351         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2352                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2353                         "(%d) doesn't match the number hardware can supported "
2354                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2355                 return -EINVAL;
2356         }
2357
2358         for (i = 0; i < reta_size; i += 4) {
2359                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2360                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2361                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2362                 if (!mask)
2363                         continue;
2364
2365                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2366                 for (j = 0; j < 4; j++) {
2367                         if (RS8(mask, j, 0x1)) {
2368                                 reta  &= ~(MS32(8 * j, 0xFF));
2369                                 reta |= LS32(reta_conf[idx].reta[shift + j],
2370                                                 8 * j, 0xFF);
2371                         }
2372                 }
2373                 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2374         }
2375         adapter->rss_reta_updated = 1;
2376
2377         return 0;
2378 }
2379
2380 int
2381 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2382                          struct rte_eth_rss_reta_entry64 *reta_conf,
2383                          uint16_t reta_size)
2384 {
2385         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2386         uint8_t i, j, mask;
2387         uint32_t reta;
2388         uint16_t idx, shift;
2389
2390         PMD_INIT_FUNC_TRACE();
2391
2392         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2393                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2394                         "(%d) doesn't match the number hardware can supported "
2395                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2396                 return -EINVAL;
2397         }
2398
2399         for (i = 0; i < reta_size; i += 4) {
2400                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2401                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2402                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2403                 if (!mask)
2404                         continue;
2405
2406                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2407                 for (j = 0; j < 4; j++) {
2408                         if (RS8(mask, j, 0x1))
2409                                 reta_conf[idx].reta[shift + j] =
2410                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
2411                 }
2412         }
2413
2414         return 0;
2415 }
2416
2417 static int
2418 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2419                                 uint32_t index, uint32_t pool)
2420 {
2421         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2422         uint32_t enable_addr = 1;
2423
2424         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2425                              pool, enable_addr);
2426 }
2427
2428 static void
2429 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2430 {
2431         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2432
2433         ngbe_clear_rar(hw, index);
2434 }
2435
2436 static int
2437 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2438 {
2439         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2440
2441         ngbe_remove_rar(dev, 0);
2442         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2443
2444         return 0;
2445 }
2446
2447 static int
2448 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2449 {
2450         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2451         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2452         struct rte_eth_dev_data *dev_data = dev->data;
2453
2454         /* If device is started, refuse mtu that requires the support of
2455          * scattered packets when this feature has not been enabled before.
2456          */
2457         if (dev_data->dev_started && !dev_data->scattered_rx &&
2458             (frame_size + 2 * RTE_VLAN_HLEN >
2459              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2460                 PMD_INIT_LOG(ERR, "Stop port first.");
2461                 return -EINVAL;
2462         }
2463
2464         if (hw->mode)
2465                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2466                         NGBE_FRAME_SIZE_MAX);
2467         else
2468                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2469                         NGBE_FRMSZ_MAX(frame_size));
2470
2471         return 0;
2472 }
2473
2474 static uint32_t
2475 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2476 {
2477         uint32_t vector = 0;
2478
2479         switch (hw->mac.mc_filter_type) {
2480         case 0:   /* use bits [47:36] of the address */
2481                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2482                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2483                 break;
2484         case 1:   /* use bits [46:35] of the address */
2485                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2486                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2487                 break;
2488         case 2:   /* use bits [45:34] of the address */
2489                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2490                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2491                 break;
2492         case 3:   /* use bits [43:32] of the address */
2493                 vector = ((uc_addr->addr_bytes[4]) |
2494                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2495                 break;
2496         default:  /* Invalid mc_filter_type */
2497                 break;
2498         }
2499
2500         /* vector can only be 12-bits or boundary will be exceeded */
2501         vector &= 0xFFF;
2502         return vector;
2503 }
2504
2505 static int
2506 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2507                         struct rte_ether_addr *mac_addr, uint8_t on)
2508 {
2509         uint32_t vector;
2510         uint32_t uta_idx;
2511         uint32_t reg_val;
2512         uint32_t uta_mask;
2513         uint32_t psrctl;
2514
2515         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2516         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2517
2518         vector = ngbe_uta_vector(hw, mac_addr);
2519         uta_idx = (vector >> 5) & 0x7F;
2520         uta_mask = 0x1UL << (vector & 0x1F);
2521
2522         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2523                 return 0;
2524
2525         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2526         if (on) {
2527                 uta_info->uta_in_use++;
2528                 reg_val |= uta_mask;
2529                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2530         } else {
2531                 uta_info->uta_in_use--;
2532                 reg_val &= ~uta_mask;
2533                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2534         }
2535
2536         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2537
2538         psrctl = rd32(hw, NGBE_PSRCTL);
2539         if (uta_info->uta_in_use > 0)
2540                 psrctl |= NGBE_PSRCTL_UCHFENA;
2541         else
2542                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2543
2544         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2545         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2546         wr32(hw, NGBE_PSRCTL, psrctl);
2547
2548         return 0;
2549 }
2550
2551 static int
2552 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2553 {
2554         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2555         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2556         uint32_t psrctl;
2557         int i;
2558
2559         if (on) {
2560                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2561                         uta_info->uta_shadow[i] = ~0;
2562                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2563                 }
2564         } else {
2565                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2566                         uta_info->uta_shadow[i] = 0;
2567                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2568                 }
2569         }
2570
2571         psrctl = rd32(hw, NGBE_PSRCTL);
2572         if (on)
2573                 psrctl |= NGBE_PSRCTL_UCHFENA;
2574         else
2575                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2576
2577         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2578         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2579         wr32(hw, NGBE_PSRCTL, psrctl);
2580
2581         return 0;
2582 }
2583
2584 /**
2585  * Set the IVAR registers, mapping interrupt causes to vectors
2586  * @param hw
2587  *  pointer to ngbe_hw struct
2588  * @direction
2589  *  0 for Rx, 1 for Tx, -1 for other causes
2590  * @queue
2591  *  queue to map the corresponding interrupt to
2592  * @msix_vector
2593  *  the vector to map to the corresponding queue
2594  */
2595 void
2596 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2597                    uint8_t queue, uint8_t msix_vector)
2598 {
2599         uint32_t tmp, idx;
2600
2601         if (direction == -1) {
2602                 /* other causes */
2603                 msix_vector |= NGBE_IVARMISC_VLD;
2604                 idx = 0;
2605                 tmp = rd32(hw, NGBE_IVARMISC);
2606                 tmp &= ~(0xFF << idx);
2607                 tmp |= (msix_vector << idx);
2608                 wr32(hw, NGBE_IVARMISC, tmp);
2609         } else {
2610                 /* rx or tx causes */
2611                 /* Workaround for ICR lost */
2612                 idx = ((16 * (queue & 1)) + (8 * direction));
2613                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2614                 tmp &= ~(0xFF << idx);
2615                 tmp |= (msix_vector << idx);
2616                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2617         }
2618 }
2619
2620 /**
2621  * Sets up the hardware to properly generate MSI-X interrupts
2622  * @hw
2623  *  board private structure
2624  */
2625 static void
2626 ngbe_configure_msix(struct rte_eth_dev *dev)
2627 {
2628         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2629         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2630         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2631         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2632         uint32_t vec = NGBE_MISC_VEC_ID;
2633         uint32_t gpie;
2634
2635         /*
2636          * Won't configure MSI-X register if no mapping is done
2637          * between intr vector and event fd
2638          * but if MSI-X has been enabled already, need to configure
2639          * auto clean, auto mask and throttling.
2640          */
2641         gpie = rd32(hw, NGBE_GPIE);
2642         if (!rte_intr_dp_is_en(intr_handle) &&
2643             !(gpie & NGBE_GPIE_MSIX))
2644                 return;
2645
2646         if (rte_intr_allow_others(intr_handle)) {
2647                 base = NGBE_RX_VEC_START;
2648                 vec = base;
2649         }
2650
2651         /* setup GPIE for MSI-X mode */
2652         gpie = rd32(hw, NGBE_GPIE);
2653         gpie |= NGBE_GPIE_MSIX;
2654         wr32(hw, NGBE_GPIE, gpie);
2655
2656         /* Populate the IVAR table and set the ITR values to the
2657          * corresponding register.
2658          */
2659         if (rte_intr_dp_is_en(intr_handle)) {
2660                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2661                         queue_id++) {
2662                         /* by default, 1:1 mapping */
2663                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2664                         rte_intr_vec_list_index_set(intr_handle,
2665                                                            queue_id, vec);
2666                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2667                             - 1)
2668                                 vec++;
2669                 }
2670
2671                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2672         }
2673         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2674                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2675                         | NGBE_ITR_WRDSA);
2676 }
2677
2678 static u8 *
2679 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2680                         u8 **mc_addr_ptr, u32 *vmdq)
2681 {
2682         u8 *mc_addr;
2683
2684         *vmdq = 0;
2685         mc_addr = *mc_addr_ptr;
2686         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2687         return mc_addr;
2688 }
2689
2690 int
2691 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2692                           struct rte_ether_addr *mc_addr_set,
2693                           uint32_t nb_mc_addr)
2694 {
2695         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2696         u8 *mc_addr_list;
2697
2698         mc_addr_list = (u8 *)mc_addr_set;
2699         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2700                                          ngbe_dev_addr_list_itr, TRUE);
2701 }
2702
2703 static uint64_t
2704 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2705 {
2706         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2707         uint64_t systime_cycles;
2708
2709         systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2710         systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2711
2712         return systime_cycles;
2713 }
2714
2715 static uint64_t
2716 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2717 {
2718         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2719         uint64_t rx_tstamp_cycles;
2720
2721         /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2722         rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2723         rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2724
2725         return rx_tstamp_cycles;
2726 }
2727
2728 static uint64_t
2729 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2730 {
2731         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2732         uint64_t tx_tstamp_cycles;
2733
2734         /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2735         tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2736         tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2737
2738         return tx_tstamp_cycles;
2739 }
2740
2741 static void
2742 ngbe_start_timecounters(struct rte_eth_dev *dev)
2743 {
2744         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2745         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2746         uint32_t incval = 0;
2747         uint32_t shift = 0;
2748
2749         incval = NGBE_INCVAL_1GB;
2750         shift = NGBE_INCVAL_SHIFT_1GB;
2751
2752         wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2753
2754         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2755         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2756         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2757
2758         adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2759         adapter->systime_tc.cc_shift = shift;
2760         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2761
2762         adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2763         adapter->rx_tstamp_tc.cc_shift = shift;
2764         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2765
2766         adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2767         adapter->tx_tstamp_tc.cc_shift = shift;
2768         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2769 }
2770
2771 static int
2772 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2773 {
2774         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2775
2776         adapter->systime_tc.nsec += delta;
2777         adapter->rx_tstamp_tc.nsec += delta;
2778         adapter->tx_tstamp_tc.nsec += delta;
2779
2780         return 0;
2781 }
2782
2783 static int
2784 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2785 {
2786         uint64_t ns;
2787         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2788
2789         ns = rte_timespec_to_ns(ts);
2790         /* Set the timecounters to a new value. */
2791         adapter->systime_tc.nsec = ns;
2792         adapter->rx_tstamp_tc.nsec = ns;
2793         adapter->tx_tstamp_tc.nsec = ns;
2794
2795         return 0;
2796 }
2797
2798 static int
2799 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2800 {
2801         uint64_t ns, systime_cycles;
2802         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2803
2804         systime_cycles = ngbe_read_systime_cyclecounter(dev);
2805         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2806         *ts = rte_ns_to_timespec(ns);
2807
2808         return 0;
2809 }
2810
2811 static int
2812 ngbe_timesync_enable(struct rte_eth_dev *dev)
2813 {
2814         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2815         uint32_t tsync_ctl;
2816
2817         /* Stop the timesync system time. */
2818         wr32(hw, NGBE_TSTIMEINC, 0x0);
2819         /* Reset the timesync system time value. */
2820         wr32(hw, NGBE_TSTIMEL, 0x0);
2821         wr32(hw, NGBE_TSTIMEH, 0x0);
2822
2823         ngbe_start_timecounters(dev);
2824
2825         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2826         wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2827                 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2828
2829         /* Enable timestamping of received PTP packets. */
2830         tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2831         tsync_ctl |= NGBE_TSRXCTL_ENA;
2832         wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2833
2834         /* Enable timestamping of transmitted PTP packets. */
2835         tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2836         tsync_ctl |= NGBE_TSTXCTL_ENA;
2837         wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2838
2839         ngbe_flush(hw);
2840
2841         return 0;
2842 }
2843
2844 static int
2845 ngbe_timesync_disable(struct rte_eth_dev *dev)
2846 {
2847         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2848         uint32_t tsync_ctl;
2849
2850         /* Disable timestamping of transmitted PTP packets. */
2851         tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2852         tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2853         wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2854
2855         /* Disable timestamping of received PTP packets. */
2856         tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2857         tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2858         wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2859
2860         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2861         wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2862
2863         /* Stop incrementing the System Time registers. */
2864         wr32(hw, NGBE_TSTIMEINC, 0);
2865
2866         return 0;
2867 }
2868
2869 static int
2870 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2871                                  struct timespec *timestamp,
2872                                  uint32_t flags __rte_unused)
2873 {
2874         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2875         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2876         uint32_t tsync_rxctl;
2877         uint64_t rx_tstamp_cycles;
2878         uint64_t ns;
2879
2880         tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2881         if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2882                 return -EINVAL;
2883
2884         rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2885         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2886         *timestamp = rte_ns_to_timespec(ns);
2887
2888         return  0;
2889 }
2890
2891 static int
2892 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2893                                  struct timespec *timestamp)
2894 {
2895         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2896         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2897         uint32_t tsync_txctl;
2898         uint64_t tx_tstamp_cycles;
2899         uint64_t ns;
2900
2901         tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2902         if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2903                 return -EINVAL;
2904
2905         tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2906         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2907         *timestamp = rte_ns_to_timespec(ns);
2908
2909         return 0;
2910 }
2911
2912 static int
2913 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2914 {
2915         int count = 0;
2916         int g_ind = 0;
2917         const struct reg_info *reg_group;
2918         const struct reg_info **reg_set = ngbe_regs_others;
2919
2920         while ((reg_group = reg_set[g_ind++]))
2921                 count += ngbe_regs_group_count(reg_group);
2922
2923         return count;
2924 }
2925
2926 static int
2927 ngbe_get_regs(struct rte_eth_dev *dev,
2928               struct rte_dev_reg_info *regs)
2929 {
2930         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2931         uint32_t *data = regs->data;
2932         int g_ind = 0;
2933         int count = 0;
2934         const struct reg_info *reg_group;
2935         const struct reg_info **reg_set = ngbe_regs_others;
2936
2937         if (data == NULL) {
2938                 regs->length = ngbe_get_reg_length(dev);
2939                 regs->width = sizeof(uint32_t);
2940                 return 0;
2941         }
2942
2943         /* Support only full register dump */
2944         if (regs->length == 0 ||
2945             regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2946                 regs->version = hw->mac.type << 24 |
2947                                 hw->revision_id << 16 |
2948                                 hw->device_id;
2949                 while ((reg_group = reg_set[g_ind++]))
2950                         count += ngbe_read_regs_group(dev, &data[count],
2951                                                       reg_group);
2952                 return 0;
2953         }
2954
2955         return -ENOTSUP;
2956 }
2957
2958 static int
2959 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2960 {
2961         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2962
2963         /* Return unit is byte count */
2964         return hw->rom.word_size * 2;
2965 }
2966
2967 static int
2968 ngbe_get_eeprom(struct rte_eth_dev *dev,
2969                 struct rte_dev_eeprom_info *in_eeprom)
2970 {
2971         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2972         struct ngbe_rom_info *eeprom = &hw->rom;
2973         uint16_t *data = in_eeprom->data;
2974         int first, length;
2975
2976         first = in_eeprom->offset >> 1;
2977         length = in_eeprom->length >> 1;
2978         if (first > hw->rom.word_size ||
2979             ((first + length) > hw->rom.word_size))
2980                 return -EINVAL;
2981
2982         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2983
2984         return eeprom->readw_buffer(hw, first, length, data);
2985 }
2986
2987 static int
2988 ngbe_set_eeprom(struct rte_eth_dev *dev,
2989                 struct rte_dev_eeprom_info *in_eeprom)
2990 {
2991         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2992         struct ngbe_rom_info *eeprom = &hw->rom;
2993         uint16_t *data = in_eeprom->data;
2994         int first, length;
2995
2996         first = in_eeprom->offset >> 1;
2997         length = in_eeprom->length >> 1;
2998         if (first > hw->rom.word_size ||
2999             ((first + length) > hw->rom.word_size))
3000                 return -EINVAL;
3001
3002         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3003
3004         return eeprom->writew_buffer(hw,  first, length, data);
3005 }
3006
3007 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3008         .dev_configure              = ngbe_dev_configure,
3009         .dev_infos_get              = ngbe_dev_info_get,
3010         .dev_start                  = ngbe_dev_start,
3011         .dev_stop                   = ngbe_dev_stop,
3012         .dev_close                  = ngbe_dev_close,
3013         .dev_reset                  = ngbe_dev_reset,
3014         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
3015         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
3016         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
3017         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
3018         .link_update                = ngbe_dev_link_update,
3019         .stats_get                  = ngbe_dev_stats_get,
3020         .xstats_get                 = ngbe_dev_xstats_get,
3021         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3022         .stats_reset                = ngbe_dev_stats_reset,
3023         .xstats_reset               = ngbe_dev_xstats_reset,
3024         .xstats_get_names           = ngbe_dev_xstats_get_names,
3025         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3026         .fw_version_get             = ngbe_fw_version_get,
3027         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3028         .mtu_set                    = ngbe_dev_mtu_set,
3029         .vlan_filter_set            = ngbe_vlan_filter_set,
3030         .vlan_tpid_set              = ngbe_vlan_tpid_set,
3031         .vlan_offload_set           = ngbe_vlan_offload_set,
3032         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3033         .rx_queue_start             = ngbe_dev_rx_queue_start,
3034         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
3035         .tx_queue_start             = ngbe_dev_tx_queue_start,
3036         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
3037         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
3038         .rx_queue_release           = ngbe_dev_rx_queue_release,
3039         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
3040         .tx_queue_release           = ngbe_dev_tx_queue_release,
3041         .dev_led_on                 = ngbe_dev_led_on,
3042         .dev_led_off                = ngbe_dev_led_off,
3043         .flow_ctrl_get              = ngbe_flow_ctrl_get,
3044         .flow_ctrl_set              = ngbe_flow_ctrl_set,
3045         .mac_addr_add               = ngbe_add_rar,
3046         .mac_addr_remove            = ngbe_remove_rar,
3047         .mac_addr_set               = ngbe_set_default_mac_addr,
3048         .uc_hash_table_set          = ngbe_uc_hash_table_set,
3049         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3050         .reta_update                = ngbe_dev_rss_reta_update,
3051         .reta_query                 = ngbe_dev_rss_reta_query,
3052         .rss_hash_update            = ngbe_dev_rss_hash_update,
3053         .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3054         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3055         .rxq_info_get               = ngbe_rxq_info_get,
3056         .txq_info_get               = ngbe_txq_info_get,
3057         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3058         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3059         .timesync_enable            = ngbe_timesync_enable,
3060         .timesync_disable           = ngbe_timesync_disable,
3061         .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3062         .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3063         .get_reg                    = ngbe_get_regs,
3064         .get_eeprom_length          = ngbe_get_eeprom_length,
3065         .get_eeprom                 = ngbe_get_eeprom,
3066         .set_eeprom                 = ngbe_set_eeprom,
3067         .timesync_adjust_time       = ngbe_timesync_adjust_time,
3068         .timesync_read_time         = ngbe_timesync_read_time,
3069         .timesync_write_time        = ngbe_timesync_write_time,
3070         .tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3071 };
3072
3073 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3074 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3075 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3076
3077 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3078 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3079
3080 #ifdef RTE_ETHDEV_DEBUG_RX
3081         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3082 #endif
3083 #ifdef RTE_ETHDEV_DEBUG_TX
3084         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3085 #endif