net/ngbe: fix Rx by initializing packet buffer early
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17
18 static const struct reg_info ngbe_regs_general[] = {
19         {NGBE_RST, 1, 1, "NGBE_RST"},
20         {NGBE_STAT, 1, 1, "NGBE_STAT"},
21         {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22         {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23         {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24         {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25         {0, 0, 0, ""}
26 };
27
28 static const struct reg_info ngbe_regs_nvm[] = {
29         {0, 0, 0, ""}
30 };
31
32 static const struct reg_info ngbe_regs_interrupt[] = {
33         {0, 0, 0, ""}
34 };
35
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37         {0, 0, 0, ""}
38 };
39
40 static const struct reg_info ngbe_regs_rxdma[] = {
41         {0, 0, 0, ""}
42 };
43
44 static const struct reg_info ngbe_regs_rx[] = {
45         {0, 0, 0, ""}
46 };
47
48 static struct reg_info ngbe_regs_tx[] = {
49         {0, 0, 0, ""}
50 };
51
52 static const struct reg_info ngbe_regs_wakeup[] = {
53         {0, 0, 0, ""}
54 };
55
56 static const struct reg_info ngbe_regs_mac[] = {
57         {0, 0, 0, ""}
58 };
59
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61         {0, 0, 0, ""},
62 };
63
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66                                 ngbe_regs_general,
67                                 ngbe_regs_nvm,
68                                 ngbe_regs_interrupt,
69                                 ngbe_regs_fctl_others,
70                                 ngbe_regs_rxdma,
71                                 ngbe_regs_rx,
72                                 ngbe_regs_tx,
73                                 ngbe_regs_wakeup,
74                                 ngbe_regs_mac,
75                                 ngbe_regs_diagnostic,
76                                 NULL};
77
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80                                 int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84                                         uint16_t queue);
85
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_dev_interrupt_delayed_handler(void *param);
93 static void ngbe_configure_msix(struct rte_eth_dev *dev);
94
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98                 (h)->bitmap[idx] |= 1 << bit;\
99         } while (0)
100
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104                 (h)->bitmap[idx] &= ~(1 << bit);\
105         } while (0)
106
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110                 (r) = (h)->bitmap[idx] >> bit & 1;\
111         } while (0)
112
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129         { .vendor_id = 0, /* sentinel */ },
130 };
131
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133         .nb_max = NGBE_RING_DESC_MAX,
134         .nb_min = NGBE_RING_DESC_MIN,
135         .nb_align = NGBE_RXD_ALIGN,
136 };
137
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139         .nb_max = NGBE_RING_DESC_MAX,
140         .nb_min = NGBE_RING_DESC_MIN,
141         .nb_align = NGBE_TXD_ALIGN,
142         .nb_seg_max = NGBE_TX_MAX_SEG,
143         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151         /* MNG RxTx */
152         HW_XSTAT(mng_bmc2host_packets),
153         HW_XSTAT(mng_host2bmc_packets),
154         /* Basic RxTx */
155         HW_XSTAT(rx_packets),
156         HW_XSTAT(tx_packets),
157         HW_XSTAT(rx_bytes),
158         HW_XSTAT(tx_bytes),
159         HW_XSTAT(rx_total_bytes),
160         HW_XSTAT(rx_total_packets),
161         HW_XSTAT(tx_total_packets),
162         HW_XSTAT(rx_total_missed_packets),
163         HW_XSTAT(rx_broadcast_packets),
164         HW_XSTAT(rx_multicast_packets),
165         HW_XSTAT(rx_management_packets),
166         HW_XSTAT(tx_management_packets),
167         HW_XSTAT(rx_management_dropped),
168
169         /* Basic Error */
170         HW_XSTAT(rx_crc_errors),
171         HW_XSTAT(rx_illegal_byte_errors),
172         HW_XSTAT(rx_error_bytes),
173         HW_XSTAT(rx_mac_short_packet_dropped),
174         HW_XSTAT(rx_length_errors),
175         HW_XSTAT(rx_undersize_errors),
176         HW_XSTAT(rx_fragment_errors),
177         HW_XSTAT(rx_oversize_errors),
178         HW_XSTAT(rx_jabber_errors),
179         HW_XSTAT(rx_l3_l4_xsum_error),
180         HW_XSTAT(mac_local_errors),
181         HW_XSTAT(mac_remote_errors),
182
183         /* MACSEC */
184         HW_XSTAT(tx_macsec_pkts_untagged),
185         HW_XSTAT(tx_macsec_pkts_encrypted),
186         HW_XSTAT(tx_macsec_pkts_protected),
187         HW_XSTAT(tx_macsec_octets_encrypted),
188         HW_XSTAT(tx_macsec_octets_protected),
189         HW_XSTAT(rx_macsec_pkts_untagged),
190         HW_XSTAT(rx_macsec_pkts_badtag),
191         HW_XSTAT(rx_macsec_pkts_nosci),
192         HW_XSTAT(rx_macsec_pkts_unknownsci),
193         HW_XSTAT(rx_macsec_octets_decrypted),
194         HW_XSTAT(rx_macsec_octets_validated),
195         HW_XSTAT(rx_macsec_sc_pkts_unchecked),
196         HW_XSTAT(rx_macsec_sc_pkts_delayed),
197         HW_XSTAT(rx_macsec_sc_pkts_late),
198         HW_XSTAT(rx_macsec_sa_pkts_ok),
199         HW_XSTAT(rx_macsec_sa_pkts_invalid),
200         HW_XSTAT(rx_macsec_sa_pkts_notvalid),
201         HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
202         HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
203
204         /* MAC RxTx */
205         HW_XSTAT(rx_size_64_packets),
206         HW_XSTAT(rx_size_65_to_127_packets),
207         HW_XSTAT(rx_size_128_to_255_packets),
208         HW_XSTAT(rx_size_256_to_511_packets),
209         HW_XSTAT(rx_size_512_to_1023_packets),
210         HW_XSTAT(rx_size_1024_to_max_packets),
211         HW_XSTAT(tx_size_64_packets),
212         HW_XSTAT(tx_size_65_to_127_packets),
213         HW_XSTAT(tx_size_128_to_255_packets),
214         HW_XSTAT(tx_size_256_to_511_packets),
215         HW_XSTAT(tx_size_512_to_1023_packets),
216         HW_XSTAT(tx_size_1024_to_max_packets),
217
218         /* Flow Control */
219         HW_XSTAT(tx_xon_packets),
220         HW_XSTAT(rx_xon_packets),
221         HW_XSTAT(tx_xoff_packets),
222         HW_XSTAT(rx_xoff_packets),
223
224         HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
225         HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
226         HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
227         HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
228 };
229
230 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
231                            sizeof(rte_ngbe_stats_strings[0]))
232
233 /* Per-queue statistics */
234 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
235 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
236         QP_XSTAT(rx_qp_packets),
237         QP_XSTAT(tx_qp_packets),
238         QP_XSTAT(rx_qp_bytes),
239         QP_XSTAT(tx_qp_bytes),
240         QP_XSTAT(rx_qp_mc_packets),
241 };
242
243 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
244                            sizeof(rte_ngbe_qp_strings[0]))
245
246 static inline int32_t
247 ngbe_pf_reset_hw(struct ngbe_hw *hw)
248 {
249         uint32_t ctrl_ext;
250         int32_t status;
251
252         status = hw->mac.reset_hw(hw);
253
254         ctrl_ext = rd32(hw, NGBE_PORTCTL);
255         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
256         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
257         wr32(hw, NGBE_PORTCTL, ctrl_ext);
258         ngbe_flush(hw);
259
260         if (status == NGBE_ERR_SFP_NOT_PRESENT)
261                 status = 0;
262         return status;
263 }
264
265 static inline void
266 ngbe_enable_intr(struct rte_eth_dev *dev)
267 {
268         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
269         struct ngbe_hw *hw = ngbe_dev_hw(dev);
270
271         wr32(hw, NGBE_IENMISC, intr->mask_misc);
272         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
273         ngbe_flush(hw);
274 }
275
276 static void
277 ngbe_disable_intr(struct ngbe_hw *hw)
278 {
279         PMD_INIT_FUNC_TRACE();
280
281         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
282         ngbe_flush(hw);
283 }
284
285 /*
286  * Ensure that all locks are released before first NVM or PHY access
287  */
288 static void
289 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
290 {
291         uint16_t mask;
292
293         /*
294          * These ones are more tricky since they are common to all ports; but
295          * swfw_sync retries last long enough (1s) to be almost sure that if
296          * lock can not be taken it is due to an improper lock of the
297          * semaphore.
298          */
299         mask = NGBE_MNGSEM_SWPHY |
300                NGBE_MNGSEM_SWMBX |
301                NGBE_MNGSEM_SWFLASH;
302         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
303                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
304
305         hw->mac.release_swfw_sync(hw, mask);
306 }
307
308 static int
309 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
310 {
311         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
312         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
313         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
314         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
315         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
316         const struct rte_memzone *mz;
317         uint32_t ctrl_ext;
318         int err, ret;
319
320         PMD_INIT_FUNC_TRACE();
321
322         eth_dev->dev_ops = &ngbe_eth_dev_ops;
323         eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
324         eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
325         eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
326         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
327         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
328         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
329
330         /*
331          * For secondary processes, we don't initialise any further as primary
332          * has already done this work. Only check we don't need a different
333          * Rx and Tx function.
334          */
335         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
336                 struct ngbe_tx_queue *txq;
337                 /* Tx queue function in primary, set by last queue initialized
338                  * Tx queue may not initialized by primary process
339                  */
340                 if (eth_dev->data->tx_queues) {
341                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
342                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
343                         ngbe_set_tx_function(eth_dev, txq);
344                 } else {
345                         /* Use default Tx function if we get here */
346                         PMD_INIT_LOG(NOTICE,
347                                 "No Tx queues configured yet. Using default Tx function.");
348                 }
349
350                 ngbe_set_rx_function(eth_dev);
351
352                 return 0;
353         }
354
355         rte_eth_copy_pci_info(eth_dev, pci_dev);
356         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
357
358         /* Vendor and Device ID need to be set before init of shared code */
359         hw->device_id = pci_dev->id.device_id;
360         hw->vendor_id = pci_dev->id.vendor_id;
361         hw->sub_system_id = pci_dev->id.subsystem_device_id;
362         ngbe_map_device_id(hw);
363         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
364
365         /* Reserve memory for interrupt status block */
366         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
367                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
368         if (mz == NULL)
369                 return -ENOMEM;
370
371         hw->isb_dma = TMZ_PADDR(mz);
372         hw->isb_mem = TMZ_VADDR(mz);
373
374         /* Initialize the shared code (base driver) */
375         err = ngbe_init_shared_code(hw);
376         if (err != 0) {
377                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
378                 return -EIO;
379         }
380
381         /* Unlock any pending hardware semaphore */
382         ngbe_swfw_lock_reset(hw);
383
384         /* Get Hardware Flow Control setting */
385         hw->fc.requested_mode = ngbe_fc_full;
386         hw->fc.current_mode = ngbe_fc_full;
387         hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
388         hw->fc.low_water = NGBE_FC_XON_LOTH;
389         hw->fc.high_water = NGBE_FC_XOFF_HITH;
390         hw->fc.send_xon = 1;
391
392         err = hw->rom.init_params(hw);
393         if (err != 0) {
394                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
395                 return -EIO;
396         }
397
398         /* Make sure we have a good EEPROM before we read from it */
399         err = hw->rom.validate_checksum(hw, NULL);
400         if (err != 0) {
401                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
402                 return -EIO;
403         }
404
405         err = hw->mac.init_hw(hw);
406         if (err != 0) {
407                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
408                 return -EIO;
409         }
410
411         /* Reset the hw statistics */
412         ngbe_dev_stats_reset(eth_dev);
413
414         /* disable interrupt */
415         ngbe_disable_intr(hw);
416
417         /* Allocate memory for storing MAC addresses */
418         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
419                                                hw->mac.num_rar_entries, 0);
420         if (eth_dev->data->mac_addrs == NULL) {
421                 PMD_INIT_LOG(ERR,
422                              "Failed to allocate %u bytes needed to store MAC addresses",
423                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
424                 return -ENOMEM;
425         }
426
427         /* Copy the permanent MAC address */
428         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
429                         &eth_dev->data->mac_addrs[0]);
430
431         /* Allocate memory for storing hash filter MAC addresses */
432         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
433                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
434         if (eth_dev->data->hash_mac_addrs == NULL) {
435                 PMD_INIT_LOG(ERR,
436                              "Failed to allocate %d bytes needed to store MAC addresses",
437                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
438                 rte_free(eth_dev->data->mac_addrs);
439                 eth_dev->data->mac_addrs = NULL;
440                 return -ENOMEM;
441         }
442
443         /* initialize the vfta */
444         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
445
446         /* initialize the hw strip bitmap*/
447         memset(hwstrip, 0, sizeof(*hwstrip));
448
449         /* initialize PF if max_vfs not zero */
450         ret = ngbe_pf_host_init(eth_dev);
451         if (ret) {
452                 rte_free(eth_dev->data->mac_addrs);
453                 eth_dev->data->mac_addrs = NULL;
454                 rte_free(eth_dev->data->hash_mac_addrs);
455                 eth_dev->data->hash_mac_addrs = NULL;
456                 return ret;
457         }
458
459         ctrl_ext = rd32(hw, NGBE_PORTCTL);
460         /* let hardware know driver is loaded */
461         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
462         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
463         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
464         wr32(hw, NGBE_PORTCTL, ctrl_ext);
465         ngbe_flush(hw);
466
467         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
468                         (int)hw->mac.type, (int)hw->phy.type);
469
470         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
471                      eth_dev->data->port_id, pci_dev->id.vendor_id,
472                      pci_dev->id.device_id);
473
474         rte_intr_callback_register(intr_handle,
475                                    ngbe_dev_interrupt_handler, eth_dev);
476
477         /* enable uio/vfio intr/eventfd mapping */
478         rte_intr_enable(intr_handle);
479
480         /* enable support intr */
481         ngbe_enable_intr(eth_dev);
482
483         return 0;
484 }
485
486 static int
487 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
488 {
489         PMD_INIT_FUNC_TRACE();
490
491         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
492                 return 0;
493
494         ngbe_dev_close(eth_dev);
495
496         return 0;
497 }
498
499 static int
500 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
501                 struct rte_pci_device *pci_dev)
502 {
503         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
504                         sizeof(struct ngbe_adapter),
505                         eth_dev_pci_specific_init, pci_dev,
506                         eth_ngbe_dev_init, NULL);
507 }
508
509 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
510 {
511         struct rte_eth_dev *ethdev;
512
513         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
514         if (ethdev == NULL)
515                 return 0;
516
517         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
518 }
519
520 static struct rte_pci_driver rte_ngbe_pmd = {
521         .id_table = pci_id_ngbe_map,
522         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
523                      RTE_PCI_DRV_INTR_LSC,
524         .probe = eth_ngbe_pci_probe,
525         .remove = eth_ngbe_pci_remove,
526 };
527
528 static int
529 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
530 {
531         struct ngbe_hw *hw = ngbe_dev_hw(dev);
532         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
533         uint32_t vfta;
534         uint32_t vid_idx;
535         uint32_t vid_bit;
536
537         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
538         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
539         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
540         if (on)
541                 vfta |= vid_bit;
542         else
543                 vfta &= ~vid_bit;
544         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
545
546         /* update local VFTA copy */
547         shadow_vfta->vfta[vid_idx] = vfta;
548
549         return 0;
550 }
551
552 static void
553 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
554 {
555         struct ngbe_hw *hw = ngbe_dev_hw(dev);
556         struct ngbe_rx_queue *rxq;
557         bool restart;
558         uint32_t rxcfg, rxbal, rxbah;
559
560         if (on)
561                 ngbe_vlan_hw_strip_enable(dev, queue);
562         else
563                 ngbe_vlan_hw_strip_disable(dev, queue);
564
565         rxq = dev->data->rx_queues[queue];
566         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
567         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
568         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
569         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
570                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
571                         !(rxcfg & NGBE_RXCFG_VLAN);
572                 rxcfg |= NGBE_RXCFG_VLAN;
573         } else {
574                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
575                         (rxcfg & NGBE_RXCFG_VLAN);
576                 rxcfg &= ~NGBE_RXCFG_VLAN;
577         }
578         rxcfg &= ~NGBE_RXCFG_ENA;
579
580         if (restart) {
581                 /* set vlan strip for ring */
582                 ngbe_dev_rx_queue_stop(dev, queue);
583                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
584                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
585                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
586                 ngbe_dev_rx_queue_start(dev, queue);
587         }
588 }
589
590 static int
591 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
592                     enum rte_vlan_type vlan_type,
593                     uint16_t tpid)
594 {
595         struct ngbe_hw *hw = ngbe_dev_hw(dev);
596         int ret = 0;
597         uint32_t portctrl, vlan_ext, qinq;
598
599         portctrl = rd32(hw, NGBE_PORTCTL);
600
601         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
602         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
603         switch (vlan_type) {
604         case RTE_ETH_VLAN_TYPE_INNER:
605                 if (vlan_ext) {
606                         wr32m(hw, NGBE_VLANCTL,
607                                 NGBE_VLANCTL_TPID_MASK,
608                                 NGBE_VLANCTL_TPID(tpid));
609                         wr32m(hw, NGBE_DMATXCTRL,
610                                 NGBE_DMATXCTRL_TPID_MASK,
611                                 NGBE_DMATXCTRL_TPID(tpid));
612                 } else {
613                         ret = -ENOTSUP;
614                         PMD_DRV_LOG(ERR,
615                                 "Inner type is not supported by single VLAN");
616                 }
617
618                 if (qinq) {
619                         wr32m(hw, NGBE_TAGTPID(0),
620                                 NGBE_TAGTPID_LSB_MASK,
621                                 NGBE_TAGTPID_LSB(tpid));
622                 }
623                 break;
624         case RTE_ETH_VLAN_TYPE_OUTER:
625                 if (vlan_ext) {
626                         /* Only the high 16-bits is valid */
627                         wr32m(hw, NGBE_EXTAG,
628                                 NGBE_EXTAG_VLAN_MASK,
629                                 NGBE_EXTAG_VLAN(tpid));
630                 } else {
631                         wr32m(hw, NGBE_VLANCTL,
632                                 NGBE_VLANCTL_TPID_MASK,
633                                 NGBE_VLANCTL_TPID(tpid));
634                         wr32m(hw, NGBE_DMATXCTRL,
635                                 NGBE_DMATXCTRL_TPID_MASK,
636                                 NGBE_DMATXCTRL_TPID(tpid));
637                 }
638
639                 if (qinq) {
640                         wr32m(hw, NGBE_TAGTPID(0),
641                                 NGBE_TAGTPID_MSB_MASK,
642                                 NGBE_TAGTPID_MSB(tpid));
643                 }
644                 break;
645         default:
646                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
647                 return -EINVAL;
648         }
649
650         return ret;
651 }
652
653 void
654 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
655 {
656         struct ngbe_hw *hw = ngbe_dev_hw(dev);
657         uint32_t vlnctrl;
658
659         PMD_INIT_FUNC_TRACE();
660
661         /* Filter Table Disable */
662         vlnctrl = rd32(hw, NGBE_VLANCTL);
663         vlnctrl &= ~NGBE_VLANCTL_VFE;
664         wr32(hw, NGBE_VLANCTL, vlnctrl);
665 }
666
667 void
668 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
669 {
670         struct ngbe_hw *hw = ngbe_dev_hw(dev);
671         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
672         uint32_t vlnctrl;
673         uint16_t i;
674
675         PMD_INIT_FUNC_TRACE();
676
677         /* Filter Table Enable */
678         vlnctrl = rd32(hw, NGBE_VLANCTL);
679         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
680         vlnctrl |= NGBE_VLANCTL_VFE;
681         wr32(hw, NGBE_VLANCTL, vlnctrl);
682
683         /* write whatever is in local vfta copy */
684         for (i = 0; i < NGBE_VFTA_SIZE; i++)
685                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
686 }
687
688 void
689 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
690 {
691         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
692         struct ngbe_rx_queue *rxq;
693
694         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
695                 return;
696
697         if (on)
698                 NGBE_SET_HWSTRIP(hwstrip, queue);
699         else
700                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
701
702         if (queue >= dev->data->nb_rx_queues)
703                 return;
704
705         rxq = dev->data->rx_queues[queue];
706
707         if (on) {
708                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
709                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
710         } else {
711                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
712                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
713         }
714 }
715
716 static void
717 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
718 {
719         struct ngbe_hw *hw = ngbe_dev_hw(dev);
720         uint32_t ctrl;
721
722         PMD_INIT_FUNC_TRACE();
723
724         ctrl = rd32(hw, NGBE_RXCFG(queue));
725         ctrl &= ~NGBE_RXCFG_VLAN;
726         wr32(hw, NGBE_RXCFG(queue), ctrl);
727
728         /* record those setting for HW strip per queue */
729         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
730 }
731
732 static void
733 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
734 {
735         struct ngbe_hw *hw = ngbe_dev_hw(dev);
736         uint32_t ctrl;
737
738         PMD_INIT_FUNC_TRACE();
739
740         ctrl = rd32(hw, NGBE_RXCFG(queue));
741         ctrl |= NGBE_RXCFG_VLAN;
742         wr32(hw, NGBE_RXCFG(queue), ctrl);
743
744         /* record those setting for HW strip per queue */
745         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
746 }
747
748 static void
749 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
750 {
751         struct ngbe_hw *hw = ngbe_dev_hw(dev);
752         uint32_t ctrl;
753
754         PMD_INIT_FUNC_TRACE();
755
756         ctrl = rd32(hw, NGBE_PORTCTL);
757         ctrl &= ~NGBE_PORTCTL_VLANEXT;
758         ctrl &= ~NGBE_PORTCTL_QINQ;
759         wr32(hw, NGBE_PORTCTL, ctrl);
760 }
761
762 static void
763 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
764 {
765         struct ngbe_hw *hw = ngbe_dev_hw(dev);
766         uint32_t ctrl;
767
768         PMD_INIT_FUNC_TRACE();
769
770         ctrl  = rd32(hw, NGBE_PORTCTL);
771         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
772         wr32(hw, NGBE_PORTCTL, ctrl);
773 }
774
775 static void
776 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
777 {
778         struct ngbe_hw *hw = ngbe_dev_hw(dev);
779         uint32_t ctrl;
780
781         PMD_INIT_FUNC_TRACE();
782
783         ctrl = rd32(hw, NGBE_PORTCTL);
784         ctrl &= ~NGBE_PORTCTL_QINQ;
785         wr32(hw, NGBE_PORTCTL, ctrl);
786 }
787
788 static void
789 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
790 {
791         struct ngbe_hw *hw = ngbe_dev_hw(dev);
792         uint32_t ctrl;
793
794         PMD_INIT_FUNC_TRACE();
795
796         ctrl  = rd32(hw, NGBE_PORTCTL);
797         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
798         wr32(hw, NGBE_PORTCTL, ctrl);
799 }
800
801 void
802 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
803 {
804         struct ngbe_rx_queue *rxq;
805         uint16_t i;
806
807         PMD_INIT_FUNC_TRACE();
808
809         for (i = 0; i < dev->data->nb_rx_queues; i++) {
810                 rxq = dev->data->rx_queues[i];
811
812                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
813                         ngbe_vlan_hw_strip_enable(dev, i);
814                 else
815                         ngbe_vlan_hw_strip_disable(dev, i);
816         }
817 }
818
819 void
820 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
821 {
822         uint16_t i;
823         struct rte_eth_rxmode *rxmode;
824         struct ngbe_rx_queue *rxq;
825
826         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
827                 rxmode = &dev->data->dev_conf.rxmode;
828                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
829                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
830                                 rxq = dev->data->rx_queues[i];
831                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
832                         }
833                 else
834                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
835                                 rxq = dev->data->rx_queues[i];
836                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
837                         }
838         }
839 }
840
841 static int
842 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
843 {
844         struct rte_eth_rxmode *rxmode;
845         rxmode = &dev->data->dev_conf.rxmode;
846
847         if (mask & RTE_ETH_VLAN_STRIP_MASK)
848                 ngbe_vlan_hw_strip_config(dev);
849
850         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
851                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
852                         ngbe_vlan_hw_filter_enable(dev);
853                 else
854                         ngbe_vlan_hw_filter_disable(dev);
855         }
856
857         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
858                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
859                         ngbe_vlan_hw_extend_enable(dev);
860                 else
861                         ngbe_vlan_hw_extend_disable(dev);
862         }
863
864         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
865                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
866                         ngbe_qinq_hw_strip_enable(dev);
867                 else
868                         ngbe_qinq_hw_strip_disable(dev);
869         }
870
871         return 0;
872 }
873
874 static int
875 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
876 {
877         ngbe_config_vlan_strip_on_all_queues(dev, mask);
878
879         ngbe_vlan_offload_config(dev, mask);
880
881         return 0;
882 }
883
884 static int
885 ngbe_dev_configure(struct rte_eth_dev *dev)
886 {
887         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
888         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
889
890         PMD_INIT_FUNC_TRACE();
891
892         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
893                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
894
895         /* set flag to update link status after init */
896         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
897
898         /*
899          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
900          * allocation Rx preconditions we will reset it.
901          */
902         adapter->rx_bulk_alloc_allowed = true;
903
904         return 0;
905 }
906
907 static void
908 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
909 {
910         struct ngbe_hw *hw = ngbe_dev_hw(dev);
911         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
912
913         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
914         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
915         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
916         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
917                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
918         else
919                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
920
921         intr->mask_misc |= NGBE_ICRMISC_GPIO;
922 }
923
924 /*
925  * Configure device link speed and setup link.
926  * It returns 0 on success.
927  */
928 static int
929 ngbe_dev_start(struct rte_eth_dev *dev)
930 {
931         struct ngbe_hw *hw = ngbe_dev_hw(dev);
932         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
933         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
935         uint32_t intr_vector = 0;
936         int err;
937         bool link_up = false, negotiate = false;
938         uint32_t speed = 0;
939         uint32_t allowed_speeds = 0;
940         int mask = 0;
941         int status;
942         uint32_t *link_speeds;
943
944         PMD_INIT_FUNC_TRACE();
945
946         /* disable uio/vfio intr/eventfd mapping */
947         rte_intr_disable(intr_handle);
948
949         /* stop adapter */
950         hw->adapter_stopped = 0;
951         ngbe_stop_hw(hw);
952
953         /* reinitialize adapter, this calls reset and start */
954         hw->nb_rx_queues = dev->data->nb_rx_queues;
955         hw->nb_tx_queues = dev->data->nb_tx_queues;
956         status = ngbe_pf_reset_hw(hw);
957         if (status != 0)
958                 return -1;
959         hw->mac.start_hw(hw);
960         hw->mac.get_link_status = true;
961
962         /* configure PF module if SRIOV enabled */
963         ngbe_pf_host_configure(dev);
964
965         ngbe_dev_phy_intr_setup(dev);
966
967         /* check and configure queue intr-vector mapping */
968         if ((rte_intr_cap_multiple(intr_handle) ||
969              !RTE_ETH_DEV_SRIOV(dev).active) &&
970             dev->data->dev_conf.intr_conf.rxq != 0) {
971                 intr_vector = dev->data->nb_rx_queues;
972                 if (rte_intr_efd_enable(intr_handle, intr_vector))
973                         return -1;
974         }
975
976         if (rte_intr_dp_is_en(intr_handle)) {
977                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
978                                                    dev->data->nb_rx_queues)) {
979                         PMD_INIT_LOG(ERR,
980                                      "Failed to allocate %d rx_queues intr_vec",
981                                      dev->data->nb_rx_queues);
982                         return -ENOMEM;
983                 }
984         }
985
986         /* configure MSI-X for sleep until Rx interrupt */
987         ngbe_configure_msix(dev);
988
989         /* initialize transmission unit */
990         ngbe_dev_tx_init(dev);
991
992         /* This can fail when allocating mbufs for descriptor rings */
993         err = ngbe_dev_rx_init(dev);
994         if (err != 0) {
995                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
996                 goto error;
997         }
998
999         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1000                 RTE_ETH_VLAN_EXTEND_MASK;
1001         err = ngbe_vlan_offload_config(dev, mask);
1002         if (err != 0) {
1003                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1004                 goto error;
1005         }
1006
1007         hw->mac.setup_pba(hw);
1008         ngbe_configure_port(dev);
1009
1010         err = ngbe_dev_rxtx_start(dev);
1011         if (err < 0) {
1012                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1013                 goto error;
1014         }
1015
1016         /* Skip link setup if loopback mode is enabled. */
1017         if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1018                 goto skip_link_setup;
1019
1020         err = hw->mac.check_link(hw, &speed, &link_up, 0);
1021         if (err != 0)
1022                 goto error;
1023         dev->data->dev_link.link_status = link_up;
1024
1025         link_speeds = &dev->data->dev_conf.link_speeds;
1026         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1027                 negotiate = true;
1028
1029         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1030         if (err != 0)
1031                 goto error;
1032
1033         allowed_speeds = 0;
1034         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1035                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1036         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1037                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1038         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1039                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1040
1041         if (*link_speeds & ~allowed_speeds) {
1042                 PMD_INIT_LOG(ERR, "Invalid link setting");
1043                 goto error;
1044         }
1045
1046         speed = 0x0;
1047         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1048                 speed = hw->mac.default_speeds;
1049         } else {
1050                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1051                         speed |= NGBE_LINK_SPEED_1GB_FULL;
1052                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1053                         speed |= NGBE_LINK_SPEED_100M_FULL;
1054                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1055                         speed |= NGBE_LINK_SPEED_10M_FULL;
1056         }
1057
1058         hw->phy.init_hw(hw);
1059         err = hw->mac.setup_link(hw, speed, link_up);
1060         if (err != 0)
1061                 goto error;
1062
1063 skip_link_setup:
1064
1065         if (rte_intr_allow_others(intr_handle)) {
1066                 ngbe_dev_misc_interrupt_setup(dev);
1067                 /* check if lsc interrupt is enabled */
1068                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1069                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1070                 else
1071                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1072                 ngbe_dev_macsec_interrupt_setup(dev);
1073                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1074         } else {
1075                 rte_intr_callback_unregister(intr_handle,
1076                                              ngbe_dev_interrupt_handler, dev);
1077                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1078                         PMD_INIT_LOG(INFO,
1079                                      "LSC won't enable because of no intr multiplex");
1080         }
1081
1082         /* check if rxq interrupt is enabled */
1083         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1084             rte_intr_dp_is_en(intr_handle))
1085                 ngbe_dev_rxq_interrupt_setup(dev);
1086
1087         /* enable UIO/VFIO intr/eventfd mapping */
1088         rte_intr_enable(intr_handle);
1089
1090         /* resume enabled intr since HW reset */
1091         ngbe_enable_intr(dev);
1092
1093         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1094                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1095                 /* gpio0 is used to power on/off control*/
1096                 wr32(hw, NGBE_GPIODATA, 0);
1097         }
1098
1099         /*
1100          * Update link status right before return, because it may
1101          * start link configuration process in a separate thread.
1102          */
1103         ngbe_dev_link_update(dev, 0);
1104
1105         ngbe_read_stats_registers(hw, hw_stats);
1106         hw->offset_loaded = 1;
1107
1108         return 0;
1109
1110 error:
1111         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1112         ngbe_dev_clear_queues(dev);
1113         return -EIO;
1114 }
1115
1116 /*
1117  * Stop device: disable rx and tx functions to allow for reconfiguring.
1118  */
1119 static int
1120 ngbe_dev_stop(struct rte_eth_dev *dev)
1121 {
1122         struct rte_eth_link link;
1123         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1124         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1125         struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1126         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1127         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1128         int vf;
1129
1130         if (hw->adapter_stopped)
1131                 return 0;
1132
1133         PMD_INIT_FUNC_TRACE();
1134
1135         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1136                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1137                 /* gpio0 is used to power on/off control*/
1138                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1139         }
1140
1141         /* disable interrupts */
1142         ngbe_disable_intr(hw);
1143
1144         /* reset the NIC */
1145         ngbe_pf_reset_hw(hw);
1146         hw->adapter_stopped = 0;
1147
1148         /* stop adapter */
1149         ngbe_stop_hw(hw);
1150
1151         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1152                 vfinfo[vf].clear_to_send = false;
1153
1154         ngbe_dev_clear_queues(dev);
1155
1156         /* Clear stored conf */
1157         dev->data->scattered_rx = 0;
1158
1159         /* Clear recorded link status */
1160         memset(&link, 0, sizeof(link));
1161         rte_eth_linkstatus_set(dev, &link);
1162
1163         if (!rte_intr_allow_others(intr_handle))
1164                 /* resume to the default handler */
1165                 rte_intr_callback_register(intr_handle,
1166                                            ngbe_dev_interrupt_handler,
1167                                            (void *)dev);
1168
1169         /* Clean datapath event and queue/vec mapping */
1170         rte_intr_efd_disable(intr_handle);
1171         rte_intr_vec_list_free(intr_handle);
1172
1173         adapter->rss_reta_updated = 0;
1174
1175         hw->adapter_stopped = true;
1176         dev->data->dev_started = 0;
1177
1178         return 0;
1179 }
1180
1181 /*
1182  * Reset and stop device.
1183  */
1184 static int
1185 ngbe_dev_close(struct rte_eth_dev *dev)
1186 {
1187         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1188         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1189         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1190         int retries = 0;
1191         int ret;
1192
1193         PMD_INIT_FUNC_TRACE();
1194
1195         ngbe_pf_reset_hw(hw);
1196
1197         ngbe_dev_stop(dev);
1198
1199         ngbe_dev_free_queues(dev);
1200
1201         /* reprogram the RAR[0] in case user changed it. */
1202         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1203
1204         /* Unlock any pending hardware semaphore */
1205         ngbe_swfw_lock_reset(hw);
1206
1207         /* disable uio intr before callback unregister */
1208         rte_intr_disable(intr_handle);
1209
1210         do {
1211                 ret = rte_intr_callback_unregister(intr_handle,
1212                                 ngbe_dev_interrupt_handler, dev);
1213                 if (ret >= 0 || ret == -ENOENT) {
1214                         break;
1215                 } else if (ret != -EAGAIN) {
1216                         PMD_INIT_LOG(ERR,
1217                                 "intr callback unregister failed: %d",
1218                                 ret);
1219                 }
1220                 rte_delay_ms(100);
1221         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1222
1223         /* uninitialize PF if max_vfs not zero */
1224         ngbe_pf_host_uninit(dev);
1225
1226         rte_free(dev->data->mac_addrs);
1227         dev->data->mac_addrs = NULL;
1228
1229         rte_free(dev->data->hash_mac_addrs);
1230         dev->data->hash_mac_addrs = NULL;
1231
1232         return ret;
1233 }
1234
1235 /*
1236  * Reset PF device.
1237  */
1238 static int
1239 ngbe_dev_reset(struct rte_eth_dev *dev)
1240 {
1241         int ret;
1242
1243         /* When a DPDK PMD PF begin to reset PF port, it should notify all
1244          * its VF to make them align with it. The detailed notification
1245          * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1246          * To avoid unexpected behavior in VF, currently reset of PF with
1247          * SR-IOV activation is not supported. It might be supported later.
1248          */
1249         if (dev->data->sriov.active)
1250                 return -ENOTSUP;
1251
1252         ret = eth_ngbe_dev_uninit(dev);
1253         if (ret != 0)
1254                 return ret;
1255
1256         ret = eth_ngbe_dev_init(dev, NULL);
1257
1258         return ret;
1259 }
1260
1261 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1262         {                                                       \
1263                 uint32_t current_counter = rd32(hw, reg);       \
1264                 if (current_counter < last_counter)             \
1265                         current_counter += 0x100000000LL;       \
1266                 if (!hw->offset_loaded)                         \
1267                         last_counter = current_counter;         \
1268                 counter = current_counter - last_counter;       \
1269                 counter &= 0xFFFFFFFFLL;                        \
1270         }
1271
1272 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1273         {                                                                \
1274                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1275                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1276                 uint64_t current_counter = (current_counter_msb << 32) | \
1277                         current_counter_lsb;                             \
1278                 if (current_counter < last_counter)                      \
1279                         current_counter += 0x1000000000LL;               \
1280                 if (!hw->offset_loaded)                                  \
1281                         last_counter = current_counter;                  \
1282                 counter = current_counter - last_counter;                \
1283                 counter &= 0xFFFFFFFFFLL;                                \
1284         }
1285
1286 void
1287 ngbe_read_stats_registers(struct ngbe_hw *hw,
1288                            struct ngbe_hw_stats *hw_stats)
1289 {
1290         unsigned int i;
1291
1292         /* QP Stats */
1293         for (i = 0; i < hw->nb_rx_queues; i++) {
1294                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1295                         hw->qp_last[i].rx_qp_packets,
1296                         hw_stats->qp[i].rx_qp_packets);
1297                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1298                         hw->qp_last[i].rx_qp_bytes,
1299                         hw_stats->qp[i].rx_qp_bytes);
1300                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1301                         hw->qp_last[i].rx_qp_mc_packets,
1302                         hw_stats->qp[i].rx_qp_mc_packets);
1303                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1304                         hw->qp_last[i].rx_qp_bc_packets,
1305                         hw_stats->qp[i].rx_qp_bc_packets);
1306         }
1307
1308         for (i = 0; i < hw->nb_tx_queues; i++) {
1309                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1310                         hw->qp_last[i].tx_qp_packets,
1311                         hw_stats->qp[i].tx_qp_packets);
1312                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1313                         hw->qp_last[i].tx_qp_bytes,
1314                         hw_stats->qp[i].tx_qp_bytes);
1315                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1316                         hw->qp_last[i].tx_qp_mc_packets,
1317                         hw_stats->qp[i].tx_qp_mc_packets);
1318                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1319                         hw->qp_last[i].tx_qp_bc_packets,
1320                         hw_stats->qp[i].tx_qp_bc_packets);
1321         }
1322
1323         /* PB Stats */
1324         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1325         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1326         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1327         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1328         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1329         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1330
1331         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1332         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1333
1334         /* DMA Stats */
1335         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1336         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1337         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1338         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1339         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1340         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1341         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1342         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1343
1344         /* MAC Stats */
1345         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1346         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1347         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1348
1349         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1350         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1351         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1352
1353         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1354         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1355
1356         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1357         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1358         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1359         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1360         hw_stats->rx_size_512_to_1023_packets +=
1361                         rd64(hw, NGBE_MACRX512TO1023L);
1362         hw_stats->rx_size_1024_to_max_packets +=
1363                         rd64(hw, NGBE_MACRX1024TOMAXL);
1364         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1365         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1366         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1367         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1368         hw_stats->tx_size_512_to_1023_packets +=
1369                         rd64(hw, NGBE_MACTX512TO1023L);
1370         hw_stats->tx_size_1024_to_max_packets +=
1371                         rd64(hw, NGBE_MACTX1024TOMAXL);
1372
1373         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1374         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1375         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1376
1377         /* MNG Stats */
1378         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1379         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1380         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1381         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1382
1383         /* MACsec Stats */
1384         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1385         hw_stats->tx_macsec_pkts_encrypted +=
1386                         rd32(hw, NGBE_LSECTX_ENCPKT);
1387         hw_stats->tx_macsec_pkts_protected +=
1388                         rd32(hw, NGBE_LSECTX_PROTPKT);
1389         hw_stats->tx_macsec_octets_encrypted +=
1390                         rd32(hw, NGBE_LSECTX_ENCOCT);
1391         hw_stats->tx_macsec_octets_protected +=
1392                         rd32(hw, NGBE_LSECTX_PROTOCT);
1393         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1394         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1395         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1396         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1397         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1398         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1399         hw_stats->rx_macsec_sc_pkts_unchecked +=
1400                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1401         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1402         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1403         for (i = 0; i < 2; i++) {
1404                 hw_stats->rx_macsec_sa_pkts_ok +=
1405                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1406                 hw_stats->rx_macsec_sa_pkts_invalid +=
1407                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1408                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1409                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1410         }
1411         for (i = 0; i < 4; i++) {
1412                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1413                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1414                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1415                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1416         }
1417         hw_stats->rx_total_missed_packets =
1418                         hw_stats->rx_up_dropped;
1419 }
1420
1421 static int
1422 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1423 {
1424         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1425         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1426         struct ngbe_stat_mappings *stat_mappings =
1427                         NGBE_DEV_STAT_MAPPINGS(dev);
1428         uint32_t i, j;
1429
1430         ngbe_read_stats_registers(hw, hw_stats);
1431
1432         if (stats == NULL)
1433                 return -EINVAL;
1434
1435         /* Fill out the rte_eth_stats statistics structure */
1436         stats->ipackets = hw_stats->rx_packets;
1437         stats->ibytes = hw_stats->rx_bytes;
1438         stats->opackets = hw_stats->tx_packets;
1439         stats->obytes = hw_stats->tx_bytes;
1440
1441         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1442         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1443         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1444         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1445         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1446         for (i = 0; i < NGBE_MAX_QP; i++) {
1447                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1448                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1449                 uint32_t q_map;
1450
1451                 q_map = (stat_mappings->rqsm[n] >> offset)
1452                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1453                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1454                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1455                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1456                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1457
1458                 q_map = (stat_mappings->tqsm[n] >> offset)
1459                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1460                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1461                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1462                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1463                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1464         }
1465
1466         /* Rx Errors */
1467         stats->imissed  = hw_stats->rx_total_missed_packets +
1468                           hw_stats->rx_dma_drop;
1469         stats->ierrors  = hw_stats->rx_crc_errors +
1470                           hw_stats->rx_mac_short_packet_dropped +
1471                           hw_stats->rx_length_errors +
1472                           hw_stats->rx_undersize_errors +
1473                           hw_stats->rx_oversize_errors +
1474                           hw_stats->rx_illegal_byte_errors +
1475                           hw_stats->rx_error_bytes +
1476                           hw_stats->rx_fragment_errors;
1477
1478         /* Tx Errors */
1479         stats->oerrors  = 0;
1480         return 0;
1481 }
1482
1483 static int
1484 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1485 {
1486         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1487         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1488
1489         /* HW registers are cleared on read */
1490         hw->offset_loaded = 0;
1491         ngbe_dev_stats_get(dev, NULL);
1492         hw->offset_loaded = 1;
1493
1494         /* Reset software totals */
1495         memset(hw_stats, 0, sizeof(*hw_stats));
1496
1497         return 0;
1498 }
1499
1500 /* This function calculates the number of xstats based on the current config */
1501 static unsigned
1502 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1503 {
1504         int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1505         return NGBE_NB_HW_STATS +
1506                NGBE_NB_QP_STATS * nb_queues;
1507 }
1508
1509 static inline int
1510 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1511 {
1512         int nb, st;
1513
1514         /* Extended stats from ngbe_hw_stats */
1515         if (id < NGBE_NB_HW_STATS) {
1516                 snprintf(name, size, "[hw]%s",
1517                         rte_ngbe_stats_strings[id].name);
1518                 return 0;
1519         }
1520         id -= NGBE_NB_HW_STATS;
1521
1522         /* Queue Stats */
1523         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1524                 nb = id / NGBE_NB_QP_STATS;
1525                 st = id % NGBE_NB_QP_STATS;
1526                 snprintf(name, size, "[q%u]%s", nb,
1527                         rte_ngbe_qp_strings[st].name);
1528                 return 0;
1529         }
1530         id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1531
1532         return -(int)(id + 1);
1533 }
1534
1535 static inline int
1536 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1537 {
1538         int nb, st;
1539
1540         /* Extended stats from ngbe_hw_stats */
1541         if (id < NGBE_NB_HW_STATS) {
1542                 *offset = rte_ngbe_stats_strings[id].offset;
1543                 return 0;
1544         }
1545         id -= NGBE_NB_HW_STATS;
1546
1547         /* Queue Stats */
1548         if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1549                 nb = id / NGBE_NB_QP_STATS;
1550                 st = id % NGBE_NB_QP_STATS;
1551                 *offset = rte_ngbe_qp_strings[st].offset +
1552                         nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1553                 return 0;
1554         }
1555
1556         return -1;
1557 }
1558
1559 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1560         struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1561 {
1562         unsigned int i, count;
1563
1564         count = ngbe_xstats_calc_num(dev);
1565         if (xstats_names == NULL)
1566                 return count;
1567
1568         /* Note: limit >= cnt_stats checked upstream
1569          * in rte_eth_xstats_names()
1570          */
1571         limit = min(limit, count);
1572
1573         /* Extended stats from ngbe_hw_stats */
1574         for (i = 0; i < limit; i++) {
1575                 if (ngbe_get_name_by_id(i, xstats_names[i].name,
1576                         sizeof(xstats_names[i].name))) {
1577                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1578                         break;
1579                 }
1580         }
1581
1582         return i;
1583 }
1584
1585 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1586         const uint64_t *ids,
1587         struct rte_eth_xstat_name *xstats_names,
1588         unsigned int limit)
1589 {
1590         unsigned int i;
1591
1592         if (ids == NULL)
1593                 return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1594
1595         for (i = 0; i < limit; i++) {
1596                 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1597                                 sizeof(xstats_names[i].name))) {
1598                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1599                         return -1;
1600                 }
1601         }
1602
1603         return i;
1604 }
1605
1606 static int
1607 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1608                                          unsigned int limit)
1609 {
1610         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1611         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1612         unsigned int i, count;
1613
1614         ngbe_read_stats_registers(hw, hw_stats);
1615
1616         /* If this is a reset xstats is NULL, and we have cleared the
1617          * registers by reading them.
1618          */
1619         count = ngbe_xstats_calc_num(dev);
1620         if (xstats == NULL)
1621                 return count;
1622
1623         limit = min(limit, ngbe_xstats_calc_num(dev));
1624
1625         /* Extended stats from ngbe_hw_stats */
1626         for (i = 0; i < limit; i++) {
1627                 uint32_t offset = 0;
1628
1629                 if (ngbe_get_offset_by_id(i, &offset)) {
1630                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1631                         break;
1632                 }
1633                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1634                 xstats[i].id = i;
1635         }
1636
1637         return i;
1638 }
1639
1640 static int
1641 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1642                                          unsigned int limit)
1643 {
1644         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1645         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1646         unsigned int i, count;
1647
1648         ngbe_read_stats_registers(hw, hw_stats);
1649
1650         /* If this is a reset xstats is NULL, and we have cleared the
1651          * registers by reading them.
1652          */
1653         count = ngbe_xstats_calc_num(dev);
1654         if (values == NULL)
1655                 return count;
1656
1657         limit = min(limit, ngbe_xstats_calc_num(dev));
1658
1659         /* Extended stats from ngbe_hw_stats */
1660         for (i = 0; i < limit; i++) {
1661                 uint32_t offset;
1662
1663                 if (ngbe_get_offset_by_id(i, &offset)) {
1664                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1665                         break;
1666                 }
1667                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1668         }
1669
1670         return i;
1671 }
1672
1673 static int
1674 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1675                 uint64_t *values, unsigned int limit)
1676 {
1677         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1678         unsigned int i;
1679
1680         if (ids == NULL)
1681                 return ngbe_dev_xstats_get_(dev, values, limit);
1682
1683         for (i = 0; i < limit; i++) {
1684                 uint32_t offset;
1685
1686                 if (ngbe_get_offset_by_id(ids[i], &offset)) {
1687                         PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1688                         break;
1689                 }
1690                 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1691         }
1692
1693         return i;
1694 }
1695
1696 static int
1697 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1698 {
1699         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1700         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1701
1702         /* HW registers are cleared on read */
1703         hw->offset_loaded = 0;
1704         ngbe_read_stats_registers(hw, hw_stats);
1705         hw->offset_loaded = 1;
1706
1707         /* Reset software totals */
1708         memset(hw_stats, 0, sizeof(*hw_stats));
1709
1710         return 0;
1711 }
1712
1713 static int
1714 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1715 {
1716         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1717         int ret;
1718
1719         ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1720
1721         if (ret < 0)
1722                 return -EINVAL;
1723
1724         ret += 1; /* add the size of '\0' */
1725         if (fw_size < (size_t)ret)
1726                 return ret;
1727
1728         return 0;
1729 }
1730
1731 static int
1732 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1733 {
1734         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1735         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1736
1737         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1738         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1739         dev_info->min_rx_bufsize = 1024;
1740         dev_info->max_rx_pktlen = 15872;
1741         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1742         dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1743         dev_info->max_vfs = pci_dev->max_vfs;
1744         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1745         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1746                                      dev_info->rx_queue_offload_capa);
1747         dev_info->tx_queue_offload_capa = 0;
1748         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1749
1750         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1751                 .rx_thresh = {
1752                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1753                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1754                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1755                 },
1756                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1757                 .rx_drop_en = 0,
1758                 .offloads = 0,
1759         };
1760
1761         dev_info->default_txconf = (struct rte_eth_txconf) {
1762                 .tx_thresh = {
1763                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1764                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1765                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1766                 },
1767                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1768                 .offloads = 0,
1769         };
1770
1771         dev_info->rx_desc_lim = rx_desc_lim;
1772         dev_info->tx_desc_lim = tx_desc_lim;
1773
1774         dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1775         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1776         dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1777
1778         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1779                                 RTE_ETH_LINK_SPEED_10M;
1780
1781         /* Driver-preferred Rx/Tx parameters */
1782         dev_info->default_rxportconf.burst_size = 32;
1783         dev_info->default_txportconf.burst_size = 32;
1784         dev_info->default_rxportconf.nb_queues = 1;
1785         dev_info->default_txportconf.nb_queues = 1;
1786         dev_info->default_rxportconf.ring_size = 256;
1787         dev_info->default_txportconf.ring_size = 256;
1788
1789         return 0;
1790 }
1791
1792 const uint32_t *
1793 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1794 {
1795         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1796             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1797             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1798             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1799                 return ngbe_get_supported_ptypes();
1800
1801         return NULL;
1802 }
1803
1804 /* return 0 means link status changed, -1 means not changed */
1805 int
1806 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1807                             int wait_to_complete)
1808 {
1809         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1810         struct rte_eth_link link;
1811         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1812         u32 lan_speed = 0;
1813         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1814         bool link_up;
1815         int err;
1816         int wait = 1;
1817
1818         memset(&link, 0, sizeof(link));
1819         link.link_status = RTE_ETH_LINK_DOWN;
1820         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1821         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1822         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1823                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1824
1825         hw->mac.get_link_status = true;
1826
1827         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1828                 return rte_eth_linkstatus_set(dev, &link);
1829
1830         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1831         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1832                 wait = 0;
1833
1834         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1835         if (err != 0) {
1836                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1837                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1838                 return rte_eth_linkstatus_set(dev, &link);
1839         }
1840
1841         if (!link_up)
1842                 return rte_eth_linkstatus_set(dev, &link);
1843
1844         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1845         link.link_status = RTE_ETH_LINK_UP;
1846         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1847
1848         switch (link_speed) {
1849         default:
1850         case NGBE_LINK_SPEED_UNKNOWN:
1851                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1852                 break;
1853
1854         case NGBE_LINK_SPEED_10M_FULL:
1855                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1856                 lan_speed = 0;
1857                 break;
1858
1859         case NGBE_LINK_SPEED_100M_FULL:
1860                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1861                 lan_speed = 1;
1862                 break;
1863
1864         case NGBE_LINK_SPEED_1GB_FULL:
1865                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1866                 lan_speed = 2;
1867                 break;
1868         }
1869
1870         if (hw->is_pf) {
1871                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1872                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1873                                 NGBE_LINK_SPEED_100M_FULL |
1874                                 NGBE_LINK_SPEED_10M_FULL)) {
1875                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1876                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1877                 }
1878         }
1879
1880         return rte_eth_linkstatus_set(dev, &link);
1881 }
1882
1883 static int
1884 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1885 {
1886         return ngbe_dev_link_update_share(dev, wait_to_complete);
1887 }
1888
1889 static int
1890 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1891 {
1892         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1893         uint32_t fctrl;
1894
1895         fctrl = rd32(hw, NGBE_PSRCTL);
1896         fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1897         wr32(hw, NGBE_PSRCTL, fctrl);
1898
1899         return 0;
1900 }
1901
1902 static int
1903 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1904 {
1905         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1906         uint32_t fctrl;
1907
1908         fctrl = rd32(hw, NGBE_PSRCTL);
1909         fctrl &= (~NGBE_PSRCTL_UCP);
1910         if (dev->data->all_multicast == 1)
1911                 fctrl |= NGBE_PSRCTL_MCP;
1912         else
1913                 fctrl &= (~NGBE_PSRCTL_MCP);
1914         wr32(hw, NGBE_PSRCTL, fctrl);
1915
1916         return 0;
1917 }
1918
1919 static int
1920 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1921 {
1922         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1923         uint32_t fctrl;
1924
1925         fctrl = rd32(hw, NGBE_PSRCTL);
1926         fctrl |= NGBE_PSRCTL_MCP;
1927         wr32(hw, NGBE_PSRCTL, fctrl);
1928
1929         return 0;
1930 }
1931
1932 static int
1933 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1934 {
1935         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1936         uint32_t fctrl;
1937
1938         if (dev->data->promiscuous == 1)
1939                 return 0; /* must remain in all_multicast mode */
1940
1941         fctrl = rd32(hw, NGBE_PSRCTL);
1942         fctrl &= (~NGBE_PSRCTL_MCP);
1943         wr32(hw, NGBE_PSRCTL, fctrl);
1944
1945         return 0;
1946 }
1947
1948 /**
1949  * It clears the interrupt causes and enables the interrupt.
1950  * It will be called once only during NIC initialized.
1951  *
1952  * @param dev
1953  *  Pointer to struct rte_eth_dev.
1954  * @param on
1955  *  Enable or Disable.
1956  *
1957  * @return
1958  *  - On success, zero.
1959  *  - On failure, a negative value.
1960  */
1961 static int
1962 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1963 {
1964         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1965
1966         ngbe_dev_link_status_print(dev);
1967         if (on != 0) {
1968                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1969                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1970         } else {
1971                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1972                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1973         }
1974
1975         return 0;
1976 }
1977
1978 /**
1979  * It clears the interrupt causes and enables the interrupt.
1980  * It will be called once only during NIC initialized.
1981  *
1982  * @param dev
1983  *  Pointer to struct rte_eth_dev.
1984  *
1985  * @return
1986  *  - On success, zero.
1987  *  - On failure, a negative value.
1988  */
1989 static int
1990 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1991 {
1992         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1993         u64 mask;
1994
1995         mask = NGBE_ICR_MASK;
1996         mask &= (1ULL << NGBE_MISC_VEC_ID);
1997         intr->mask |= mask;
1998         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1999
2000         return 0;
2001 }
2002
2003 /**
2004  * It clears the interrupt causes and enables the interrupt.
2005  * It will be called once only during NIC initialized.
2006  *
2007  * @param dev
2008  *  Pointer to struct rte_eth_dev.
2009  *
2010  * @return
2011  *  - On success, zero.
2012  *  - On failure, a negative value.
2013  */
2014 static int
2015 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2016 {
2017         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2018         u64 mask;
2019
2020         mask = NGBE_ICR_MASK;
2021         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2022         intr->mask |= mask;
2023
2024         return 0;
2025 }
2026
2027 /**
2028  * It clears the interrupt causes and enables the interrupt.
2029  * It will be called once only during NIC initialized.
2030  *
2031  * @param dev
2032  *  Pointer to struct rte_eth_dev.
2033  *
2034  * @return
2035  *  - On success, zero.
2036  *  - On failure, a negative value.
2037  */
2038 static int
2039 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2040 {
2041         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2042
2043         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2044
2045         return 0;
2046 }
2047
2048 /*
2049  * It reads ICR and sets flag for the link_update.
2050  *
2051  * @param dev
2052  *  Pointer to struct rte_eth_dev.
2053  *
2054  * @return
2055  *  - On success, zero.
2056  *  - On failure, a negative value.
2057  */
2058 static int
2059 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2060 {
2061         uint32_t eicr;
2062         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2063         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2064
2065         /* clear all cause mask */
2066         ngbe_disable_intr(hw);
2067
2068         /* read-on-clear nic registers here */
2069         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2070         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2071
2072         intr->flags = 0;
2073
2074         /* set flag for async link update */
2075         if (eicr & NGBE_ICRMISC_PHY)
2076                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2077
2078         if (eicr & NGBE_ICRMISC_VFMBX)
2079                 intr->flags |= NGBE_FLAG_MAILBOX;
2080
2081         if (eicr & NGBE_ICRMISC_LNKSEC)
2082                 intr->flags |= NGBE_FLAG_MACSEC;
2083
2084         if (eicr & NGBE_ICRMISC_GPIO)
2085                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2086
2087         return 0;
2088 }
2089
2090 /**
2091  * It gets and then prints the link status.
2092  *
2093  * @param dev
2094  *  Pointer to struct rte_eth_dev.
2095  *
2096  * @return
2097  *  - On success, zero.
2098  *  - On failure, a negative value.
2099  */
2100 static void
2101 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2102 {
2103         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2104         struct rte_eth_link link;
2105
2106         rte_eth_linkstatus_get(dev, &link);
2107
2108         if (link.link_status == RTE_ETH_LINK_UP) {
2109                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2110                                         (int)(dev->data->port_id),
2111                                         (unsigned int)link.link_speed,
2112                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2113                                         "full-duplex" : "half-duplex");
2114         } else {
2115                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2116                                 (int)(dev->data->port_id));
2117         }
2118         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2119                                 pci_dev->addr.domain,
2120                                 pci_dev->addr.bus,
2121                                 pci_dev->addr.devid,
2122                                 pci_dev->addr.function);
2123 }
2124
2125 /*
2126  * It executes link_update after knowing an interrupt occurred.
2127  *
2128  * @param dev
2129  *  Pointer to struct rte_eth_dev.
2130  *
2131  * @return
2132  *  - On success, zero.
2133  *  - On failure, a negative value.
2134  */
2135 static int
2136 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2137 {
2138         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2139         int64_t timeout;
2140
2141         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2142
2143         if (intr->flags & NGBE_FLAG_MAILBOX) {
2144                 ngbe_pf_mbx_process(dev);
2145                 intr->flags &= ~NGBE_FLAG_MAILBOX;
2146         }
2147
2148         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2149                 struct rte_eth_link link;
2150
2151                 /*get the link status before link update, for predicting later*/
2152                 rte_eth_linkstatus_get(dev, &link);
2153
2154                 ngbe_dev_link_update(dev, 0);
2155
2156                 /* likely to up */
2157                 if (link.link_status != RTE_ETH_LINK_UP)
2158                         /* handle it 1 sec later, wait it being stable */
2159                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2160                 /* likely to down */
2161                 else
2162                         /* handle it 4 sec later, wait it being stable */
2163                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2164
2165                 ngbe_dev_link_status_print(dev);
2166                 if (rte_eal_alarm_set(timeout * 1000,
2167                                       ngbe_dev_interrupt_delayed_handler,
2168                                       (void *)dev) < 0) {
2169                         PMD_DRV_LOG(ERR, "Error setting alarm");
2170                 } else {
2171                         /* remember original mask */
2172                         intr->mask_misc_orig = intr->mask_misc;
2173                         /* only disable lsc interrupt */
2174                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2175
2176                         intr->mask_orig = intr->mask;
2177                         /* only disable all misc interrupts */
2178                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2179                 }
2180         }
2181
2182         PMD_DRV_LOG(DEBUG, "enable intr immediately");
2183         ngbe_enable_intr(dev);
2184
2185         return 0;
2186 }
2187
2188 /**
2189  * Interrupt handler which shall be registered for alarm callback for delayed
2190  * handling specific interrupt to wait for the stable nic state. As the
2191  * NIC interrupt state is not stable for ngbe after link is just down,
2192  * it needs to wait 4 seconds to get the stable status.
2193  *
2194  * @param param
2195  *  The address of parameter (struct rte_eth_dev *) registered before.
2196  */
2197 static void
2198 ngbe_dev_interrupt_delayed_handler(void *param)
2199 {
2200         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2201         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2202         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2203         uint32_t eicr;
2204
2205         ngbe_disable_intr(hw);
2206
2207         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2208         if (eicr & NGBE_ICRMISC_VFMBX)
2209                 ngbe_pf_mbx_process(dev);
2210
2211         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2212                 ngbe_dev_link_update(dev, 0);
2213                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2214                 ngbe_dev_link_status_print(dev);
2215                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2216                                               NULL);
2217         }
2218
2219         if (intr->flags & NGBE_FLAG_MACSEC) {
2220                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2221                                               NULL);
2222                 intr->flags &= ~NGBE_FLAG_MACSEC;
2223         }
2224
2225         /* restore original mask */
2226         intr->mask_misc = intr->mask_misc_orig;
2227         intr->mask_misc_orig = 0;
2228         intr->mask = intr->mask_orig;
2229         intr->mask_orig = 0;
2230
2231         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2232         ngbe_enable_intr(dev);
2233 }
2234
2235 /**
2236  * Interrupt handler triggered by NIC  for handling
2237  * specific interrupt.
2238  *
2239  * @param param
2240  *  The address of parameter (struct rte_eth_dev *) registered before.
2241  */
2242 static void
2243 ngbe_dev_interrupt_handler(void *param)
2244 {
2245         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2246
2247         ngbe_dev_interrupt_get_status(dev);
2248         ngbe_dev_interrupt_action(dev);
2249 }
2250
2251 static int
2252 ngbe_dev_led_on(struct rte_eth_dev *dev)
2253 {
2254         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2255         return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2256 }
2257
2258 static int
2259 ngbe_dev_led_off(struct rte_eth_dev *dev)
2260 {
2261         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2262         return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2263 }
2264
2265 static int
2266 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2267 {
2268         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2269         uint32_t mflcn_reg;
2270         uint32_t fccfg_reg;
2271         int rx_pause;
2272         int tx_pause;
2273
2274         fc_conf->pause_time = hw->fc.pause_time;
2275         fc_conf->high_water = hw->fc.high_water;
2276         fc_conf->low_water = hw->fc.low_water;
2277         fc_conf->send_xon = hw->fc.send_xon;
2278         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2279
2280         /*
2281          * Return rx_pause status according to actual setting of
2282          * RXFCCFG register.
2283          */
2284         mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2285         if (mflcn_reg & NGBE_RXFCCFG_FC)
2286                 rx_pause = 1;
2287         else
2288                 rx_pause = 0;
2289
2290         /*
2291          * Return tx_pause status according to actual setting of
2292          * TXFCCFG register.
2293          */
2294         fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2295         if (fccfg_reg & NGBE_TXFCCFG_FC)
2296                 tx_pause = 1;
2297         else
2298                 tx_pause = 0;
2299
2300         if (rx_pause && tx_pause)
2301                 fc_conf->mode = RTE_ETH_FC_FULL;
2302         else if (rx_pause)
2303                 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2304         else if (tx_pause)
2305                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2306         else
2307                 fc_conf->mode = RTE_ETH_FC_NONE;
2308
2309         return 0;
2310 }
2311
2312 static int
2313 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2314 {
2315         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2316         int err;
2317         uint32_t rx_buf_size;
2318         uint32_t max_high_water;
2319         enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2320                 ngbe_fc_none,
2321                 ngbe_fc_rx_pause,
2322                 ngbe_fc_tx_pause,
2323                 ngbe_fc_full
2324         };
2325
2326         PMD_INIT_FUNC_TRACE();
2327
2328         rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2329         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2330
2331         /*
2332          * At least reserve one Ethernet frame for watermark
2333          * high_water/low_water in kilo bytes for ngbe
2334          */
2335         max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2336         if (fc_conf->high_water > max_high_water ||
2337             fc_conf->high_water < fc_conf->low_water) {
2338                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2339                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2340                 return -EINVAL;
2341         }
2342
2343         hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2344         hw->fc.pause_time     = fc_conf->pause_time;
2345         hw->fc.high_water     = fc_conf->high_water;
2346         hw->fc.low_water      = fc_conf->low_water;
2347         hw->fc.send_xon       = fc_conf->send_xon;
2348         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2349
2350         err = hw->mac.fc_enable(hw);
2351
2352         /* Not negotiated is not an error case */
2353         if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2354                 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2355                       (fc_conf->mac_ctrl_frame_fwd
2356                        ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2357                 ngbe_flush(hw);
2358
2359                 return 0;
2360         }
2361
2362         PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2363         return -EIO;
2364 }
2365
2366 int
2367 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2368                           struct rte_eth_rss_reta_entry64 *reta_conf,
2369                           uint16_t reta_size)
2370 {
2371         uint8_t i, j, mask;
2372         uint32_t reta;
2373         uint16_t idx, shift;
2374         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2375         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2376
2377         PMD_INIT_FUNC_TRACE();
2378
2379         if (!hw->is_pf) {
2380                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2381                         "NIC.");
2382                 return -ENOTSUP;
2383         }
2384
2385         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2386                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2387                         "(%d) doesn't match the number hardware can supported "
2388                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2389                 return -EINVAL;
2390         }
2391
2392         for (i = 0; i < reta_size; i += 4) {
2393                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2394                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2395                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2396                 if (!mask)
2397                         continue;
2398
2399                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2400                 for (j = 0; j < 4; j++) {
2401                         if (RS8(mask, j, 0x1)) {
2402                                 reta  &= ~(MS32(8 * j, 0xFF));
2403                                 reta |= LS32(reta_conf[idx].reta[shift + j],
2404                                                 8 * j, 0xFF);
2405                         }
2406                 }
2407                 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2408         }
2409         adapter->rss_reta_updated = 1;
2410
2411         return 0;
2412 }
2413
2414 int
2415 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2416                          struct rte_eth_rss_reta_entry64 *reta_conf,
2417                          uint16_t reta_size)
2418 {
2419         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2420         uint8_t i, j, mask;
2421         uint32_t reta;
2422         uint16_t idx, shift;
2423
2424         PMD_INIT_FUNC_TRACE();
2425
2426         if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2427                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2428                         "(%d) doesn't match the number hardware can supported "
2429                         "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2430                 return -EINVAL;
2431         }
2432
2433         for (i = 0; i < reta_size; i += 4) {
2434                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2435                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2436                 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2437                 if (!mask)
2438                         continue;
2439
2440                 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2441                 for (j = 0; j < 4; j++) {
2442                         if (RS8(mask, j, 0x1))
2443                                 reta_conf[idx].reta[shift + j] =
2444                                         (uint16_t)RS32(reta, 8 * j, 0xFF);
2445                 }
2446         }
2447
2448         return 0;
2449 }
2450
2451 static int
2452 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2453                                 uint32_t index, uint32_t pool)
2454 {
2455         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2456         uint32_t enable_addr = 1;
2457
2458         return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2459                              pool, enable_addr);
2460 }
2461
2462 static void
2463 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2464 {
2465         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2466
2467         ngbe_clear_rar(hw, index);
2468 }
2469
2470 static int
2471 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2472 {
2473         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2474
2475         ngbe_remove_rar(dev, 0);
2476         ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2477
2478         return 0;
2479 }
2480
2481 static int
2482 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2483 {
2484         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2485         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2486         struct rte_eth_dev_data *dev_data = dev->data;
2487
2488         /* If device is started, refuse mtu that requires the support of
2489          * scattered packets when this feature has not been enabled before.
2490          */
2491         if (dev_data->dev_started && !dev_data->scattered_rx &&
2492             (frame_size + 2 * RTE_VLAN_HLEN >
2493              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2494                 PMD_INIT_LOG(ERR, "Stop port first.");
2495                 return -EINVAL;
2496         }
2497
2498         if (hw->mode)
2499                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2500                         NGBE_FRAME_SIZE_MAX);
2501         else
2502                 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2503                         NGBE_FRMSZ_MAX(frame_size));
2504
2505         return 0;
2506 }
2507
2508 static uint32_t
2509 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2510 {
2511         uint32_t vector = 0;
2512
2513         switch (hw->mac.mc_filter_type) {
2514         case 0:   /* use bits [47:36] of the address */
2515                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2516                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2517                 break;
2518         case 1:   /* use bits [46:35] of the address */
2519                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2520                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2521                 break;
2522         case 2:   /* use bits [45:34] of the address */
2523                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2524                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2525                 break;
2526         case 3:   /* use bits [43:32] of the address */
2527                 vector = ((uc_addr->addr_bytes[4]) |
2528                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2529                 break;
2530         default:  /* Invalid mc_filter_type */
2531                 break;
2532         }
2533
2534         /* vector can only be 12-bits or boundary will be exceeded */
2535         vector &= 0xFFF;
2536         return vector;
2537 }
2538
2539 static int
2540 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2541                         struct rte_ether_addr *mac_addr, uint8_t on)
2542 {
2543         uint32_t vector;
2544         uint32_t uta_idx;
2545         uint32_t reg_val;
2546         uint32_t uta_mask;
2547         uint32_t psrctl;
2548
2549         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2550         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2551
2552         vector = ngbe_uta_vector(hw, mac_addr);
2553         uta_idx = (vector >> 5) & 0x7F;
2554         uta_mask = 0x1UL << (vector & 0x1F);
2555
2556         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2557                 return 0;
2558
2559         reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2560         if (on) {
2561                 uta_info->uta_in_use++;
2562                 reg_val |= uta_mask;
2563                 uta_info->uta_shadow[uta_idx] |= uta_mask;
2564         } else {
2565                 uta_info->uta_in_use--;
2566                 reg_val &= ~uta_mask;
2567                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2568         }
2569
2570         wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2571
2572         psrctl = rd32(hw, NGBE_PSRCTL);
2573         if (uta_info->uta_in_use > 0)
2574                 psrctl |= NGBE_PSRCTL_UCHFENA;
2575         else
2576                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2577
2578         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2579         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2580         wr32(hw, NGBE_PSRCTL, psrctl);
2581
2582         return 0;
2583 }
2584
2585 static int
2586 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2587 {
2588         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2589         struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2590         uint32_t psrctl;
2591         int i;
2592
2593         if (on) {
2594                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2595                         uta_info->uta_shadow[i] = ~0;
2596                         wr32(hw, NGBE_UCADDRTBL(i), ~0);
2597                 }
2598         } else {
2599                 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2600                         uta_info->uta_shadow[i] = 0;
2601                         wr32(hw, NGBE_UCADDRTBL(i), 0);
2602                 }
2603         }
2604
2605         psrctl = rd32(hw, NGBE_PSRCTL);
2606         if (on)
2607                 psrctl |= NGBE_PSRCTL_UCHFENA;
2608         else
2609                 psrctl &= ~NGBE_PSRCTL_UCHFENA;
2610
2611         psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2612         psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2613         wr32(hw, NGBE_PSRCTL, psrctl);
2614
2615         return 0;
2616 }
2617
2618 /**
2619  * Set the IVAR registers, mapping interrupt causes to vectors
2620  * @param hw
2621  *  pointer to ngbe_hw struct
2622  * @direction
2623  *  0 for Rx, 1 for Tx, -1 for other causes
2624  * @queue
2625  *  queue to map the corresponding interrupt to
2626  * @msix_vector
2627  *  the vector to map to the corresponding queue
2628  */
2629 void
2630 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2631                    uint8_t queue, uint8_t msix_vector)
2632 {
2633         uint32_t tmp, idx;
2634
2635         if (direction == -1) {
2636                 /* other causes */
2637                 msix_vector |= NGBE_IVARMISC_VLD;
2638                 idx = 0;
2639                 tmp = rd32(hw, NGBE_IVARMISC);
2640                 tmp &= ~(0xFF << idx);
2641                 tmp |= (msix_vector << idx);
2642                 wr32(hw, NGBE_IVARMISC, tmp);
2643         } else {
2644                 /* rx or tx causes */
2645                 /* Workaround for ICR lost */
2646                 idx = ((16 * (queue & 1)) + (8 * direction));
2647                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2648                 tmp &= ~(0xFF << idx);
2649                 tmp |= (msix_vector << idx);
2650                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2651         }
2652 }
2653
2654 /**
2655  * Sets up the hardware to properly generate MSI-X interrupts
2656  * @hw
2657  *  board private structure
2658  */
2659 static void
2660 ngbe_configure_msix(struct rte_eth_dev *dev)
2661 {
2662         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2663         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2664         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2665         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2666         uint32_t vec = NGBE_MISC_VEC_ID;
2667         uint32_t gpie;
2668
2669         /*
2670          * Won't configure MSI-X register if no mapping is done
2671          * between intr vector and event fd
2672          * but if MSI-X has been enabled already, need to configure
2673          * auto clean, auto mask and throttling.
2674          */
2675         gpie = rd32(hw, NGBE_GPIE);
2676         if (!rte_intr_dp_is_en(intr_handle) &&
2677             !(gpie & NGBE_GPIE_MSIX))
2678                 return;
2679
2680         if (rte_intr_allow_others(intr_handle)) {
2681                 base = NGBE_RX_VEC_START;
2682                 vec = base;
2683         }
2684
2685         /* setup GPIE for MSI-X mode */
2686         gpie = rd32(hw, NGBE_GPIE);
2687         gpie |= NGBE_GPIE_MSIX;
2688         wr32(hw, NGBE_GPIE, gpie);
2689
2690         /* Populate the IVAR table and set the ITR values to the
2691          * corresponding register.
2692          */
2693         if (rte_intr_dp_is_en(intr_handle)) {
2694                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2695                         queue_id++) {
2696                         /* by default, 1:1 mapping */
2697                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
2698                         rte_intr_vec_list_index_set(intr_handle,
2699                                                            queue_id, vec);
2700                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
2701                             - 1)
2702                                 vec++;
2703                 }
2704
2705                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2706         }
2707         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2708                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2709                         | NGBE_ITR_WRDSA);
2710 }
2711
2712 static u8 *
2713 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2714                         u8 **mc_addr_ptr, u32 *vmdq)
2715 {
2716         u8 *mc_addr;
2717
2718         *vmdq = 0;
2719         mc_addr = *mc_addr_ptr;
2720         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2721         return mc_addr;
2722 }
2723
2724 int
2725 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2726                           struct rte_ether_addr *mc_addr_set,
2727                           uint32_t nb_mc_addr)
2728 {
2729         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2730         u8 *mc_addr_list;
2731
2732         mc_addr_list = (u8 *)mc_addr_set;
2733         return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2734                                          ngbe_dev_addr_list_itr, TRUE);
2735 }
2736
2737 static uint64_t
2738 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2739 {
2740         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2741         uint64_t systime_cycles;
2742
2743         systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2744         systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2745
2746         return systime_cycles;
2747 }
2748
2749 static uint64_t
2750 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2751 {
2752         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2753         uint64_t rx_tstamp_cycles;
2754
2755         /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2756         rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2757         rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2758
2759         return rx_tstamp_cycles;
2760 }
2761
2762 static uint64_t
2763 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2764 {
2765         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2766         uint64_t tx_tstamp_cycles;
2767
2768         /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2769         tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2770         tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2771
2772         return tx_tstamp_cycles;
2773 }
2774
2775 static void
2776 ngbe_start_timecounters(struct rte_eth_dev *dev)
2777 {
2778         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2779         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2780         uint32_t incval = 0;
2781         uint32_t shift = 0;
2782
2783         incval = NGBE_INCVAL_1GB;
2784         shift = NGBE_INCVAL_SHIFT_1GB;
2785
2786         wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2787
2788         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2789         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2790         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2791
2792         adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2793         adapter->systime_tc.cc_shift = shift;
2794         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2795
2796         adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2797         adapter->rx_tstamp_tc.cc_shift = shift;
2798         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2799
2800         adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2801         adapter->tx_tstamp_tc.cc_shift = shift;
2802         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2803 }
2804
2805 static int
2806 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2807 {
2808         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2809
2810         adapter->systime_tc.nsec += delta;
2811         adapter->rx_tstamp_tc.nsec += delta;
2812         adapter->tx_tstamp_tc.nsec += delta;
2813
2814         return 0;
2815 }
2816
2817 static int
2818 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2819 {
2820         uint64_t ns;
2821         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2822
2823         ns = rte_timespec_to_ns(ts);
2824         /* Set the timecounters to a new value. */
2825         adapter->systime_tc.nsec = ns;
2826         adapter->rx_tstamp_tc.nsec = ns;
2827         adapter->tx_tstamp_tc.nsec = ns;
2828
2829         return 0;
2830 }
2831
2832 static int
2833 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2834 {
2835         uint64_t ns, systime_cycles;
2836         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2837
2838         systime_cycles = ngbe_read_systime_cyclecounter(dev);
2839         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2840         *ts = rte_ns_to_timespec(ns);
2841
2842         return 0;
2843 }
2844
2845 static int
2846 ngbe_timesync_enable(struct rte_eth_dev *dev)
2847 {
2848         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2849         uint32_t tsync_ctl;
2850
2851         /* Stop the timesync system time. */
2852         wr32(hw, NGBE_TSTIMEINC, 0x0);
2853         /* Reset the timesync system time value. */
2854         wr32(hw, NGBE_TSTIMEL, 0x0);
2855         wr32(hw, NGBE_TSTIMEH, 0x0);
2856
2857         ngbe_start_timecounters(dev);
2858
2859         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2860         wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2861                 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2862
2863         /* Enable timestamping of received PTP packets. */
2864         tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2865         tsync_ctl |= NGBE_TSRXCTL_ENA;
2866         wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2867
2868         /* Enable timestamping of transmitted PTP packets. */
2869         tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2870         tsync_ctl |= NGBE_TSTXCTL_ENA;
2871         wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2872
2873         ngbe_flush(hw);
2874
2875         return 0;
2876 }
2877
2878 static int
2879 ngbe_timesync_disable(struct rte_eth_dev *dev)
2880 {
2881         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2882         uint32_t tsync_ctl;
2883
2884         /* Disable timestamping of transmitted PTP packets. */
2885         tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2886         tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2887         wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2888
2889         /* Disable timestamping of received PTP packets. */
2890         tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2891         tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2892         wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2893
2894         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2895         wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2896
2897         /* Stop incrementing the System Time registers. */
2898         wr32(hw, NGBE_TSTIMEINC, 0);
2899
2900         return 0;
2901 }
2902
2903 static int
2904 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2905                                  struct timespec *timestamp,
2906                                  uint32_t flags __rte_unused)
2907 {
2908         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2909         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2910         uint32_t tsync_rxctl;
2911         uint64_t rx_tstamp_cycles;
2912         uint64_t ns;
2913
2914         tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2915         if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2916                 return -EINVAL;
2917
2918         rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2919         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2920         *timestamp = rte_ns_to_timespec(ns);
2921
2922         return  0;
2923 }
2924
2925 static int
2926 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2927                                  struct timespec *timestamp)
2928 {
2929         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2930         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2931         uint32_t tsync_txctl;
2932         uint64_t tx_tstamp_cycles;
2933         uint64_t ns;
2934
2935         tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2936         if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2937                 return -EINVAL;
2938
2939         tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2940         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2941         *timestamp = rte_ns_to_timespec(ns);
2942
2943         return 0;
2944 }
2945
2946 static int
2947 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2948 {
2949         int count = 0;
2950         int g_ind = 0;
2951         const struct reg_info *reg_group;
2952         const struct reg_info **reg_set = ngbe_regs_others;
2953
2954         while ((reg_group = reg_set[g_ind++]))
2955                 count += ngbe_regs_group_count(reg_group);
2956
2957         return count;
2958 }
2959
2960 static int
2961 ngbe_get_regs(struct rte_eth_dev *dev,
2962               struct rte_dev_reg_info *regs)
2963 {
2964         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2965         uint32_t *data = regs->data;
2966         int g_ind = 0;
2967         int count = 0;
2968         const struct reg_info *reg_group;
2969         const struct reg_info **reg_set = ngbe_regs_others;
2970
2971         if (data == NULL) {
2972                 regs->length = ngbe_get_reg_length(dev);
2973                 regs->width = sizeof(uint32_t);
2974                 return 0;
2975         }
2976
2977         /* Support only full register dump */
2978         if (regs->length == 0 ||
2979             regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2980                 regs->version = hw->mac.type << 24 |
2981                                 hw->revision_id << 16 |
2982                                 hw->device_id;
2983                 while ((reg_group = reg_set[g_ind++]))
2984                         count += ngbe_read_regs_group(dev, &data[count],
2985                                                       reg_group);
2986                 return 0;
2987         }
2988
2989         return -ENOTSUP;
2990 }
2991
2992 static int
2993 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2994 {
2995         struct ngbe_hw *hw = ngbe_dev_hw(dev);
2996
2997         /* Return unit is byte count */
2998         return hw->rom.word_size * 2;
2999 }
3000
3001 static int
3002 ngbe_get_eeprom(struct rte_eth_dev *dev,
3003                 struct rte_dev_eeprom_info *in_eeprom)
3004 {
3005         struct ngbe_hw *hw = ngbe_dev_hw(dev);
3006         struct ngbe_rom_info *eeprom = &hw->rom;
3007         uint16_t *data = in_eeprom->data;
3008         int first, length;
3009
3010         first = in_eeprom->offset >> 1;
3011         length = in_eeprom->length >> 1;
3012         if (first > hw->rom.word_size ||
3013             ((first + length) > hw->rom.word_size))
3014                 return -EINVAL;
3015
3016         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3017
3018         return eeprom->readw_buffer(hw, first, length, data);
3019 }
3020
3021 static int
3022 ngbe_set_eeprom(struct rte_eth_dev *dev,
3023                 struct rte_dev_eeprom_info *in_eeprom)
3024 {
3025         struct ngbe_hw *hw = ngbe_dev_hw(dev);
3026         struct ngbe_rom_info *eeprom = &hw->rom;
3027         uint16_t *data = in_eeprom->data;
3028         int first, length;
3029
3030         first = in_eeprom->offset >> 1;
3031         length = in_eeprom->length >> 1;
3032         if (first > hw->rom.word_size ||
3033             ((first + length) > hw->rom.word_size))
3034                 return -EINVAL;
3035
3036         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3037
3038         return eeprom->writew_buffer(hw,  first, length, data);
3039 }
3040
3041 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3042         .dev_configure              = ngbe_dev_configure,
3043         .dev_infos_get              = ngbe_dev_info_get,
3044         .dev_start                  = ngbe_dev_start,
3045         .dev_stop                   = ngbe_dev_stop,
3046         .dev_close                  = ngbe_dev_close,
3047         .dev_reset                  = ngbe_dev_reset,
3048         .promiscuous_enable         = ngbe_dev_promiscuous_enable,
3049         .promiscuous_disable        = ngbe_dev_promiscuous_disable,
3050         .allmulticast_enable        = ngbe_dev_allmulticast_enable,
3051         .allmulticast_disable       = ngbe_dev_allmulticast_disable,
3052         .link_update                = ngbe_dev_link_update,
3053         .stats_get                  = ngbe_dev_stats_get,
3054         .xstats_get                 = ngbe_dev_xstats_get,
3055         .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3056         .stats_reset                = ngbe_dev_stats_reset,
3057         .xstats_reset               = ngbe_dev_xstats_reset,
3058         .xstats_get_names           = ngbe_dev_xstats_get_names,
3059         .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3060         .fw_version_get             = ngbe_fw_version_get,
3061         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3062         .mtu_set                    = ngbe_dev_mtu_set,
3063         .vlan_filter_set            = ngbe_vlan_filter_set,
3064         .vlan_tpid_set              = ngbe_vlan_tpid_set,
3065         .vlan_offload_set           = ngbe_vlan_offload_set,
3066         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3067         .rx_queue_start             = ngbe_dev_rx_queue_start,
3068         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
3069         .tx_queue_start             = ngbe_dev_tx_queue_start,
3070         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
3071         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
3072         .rx_queue_release           = ngbe_dev_rx_queue_release,
3073         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
3074         .tx_queue_release           = ngbe_dev_tx_queue_release,
3075         .dev_led_on                 = ngbe_dev_led_on,
3076         .dev_led_off                = ngbe_dev_led_off,
3077         .flow_ctrl_get              = ngbe_flow_ctrl_get,
3078         .flow_ctrl_set              = ngbe_flow_ctrl_set,
3079         .mac_addr_add               = ngbe_add_rar,
3080         .mac_addr_remove            = ngbe_remove_rar,
3081         .mac_addr_set               = ngbe_set_default_mac_addr,
3082         .uc_hash_table_set          = ngbe_uc_hash_table_set,
3083         .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3084         .reta_update                = ngbe_dev_rss_reta_update,
3085         .reta_query                 = ngbe_dev_rss_reta_query,
3086         .rss_hash_update            = ngbe_dev_rss_hash_update,
3087         .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3088         .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3089         .rxq_info_get               = ngbe_rxq_info_get,
3090         .txq_info_get               = ngbe_txq_info_get,
3091         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3092         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3093         .timesync_enable            = ngbe_timesync_enable,
3094         .timesync_disable           = ngbe_timesync_disable,
3095         .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3096         .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3097         .get_reg                    = ngbe_get_regs,
3098         .get_eeprom_length          = ngbe_get_eeprom_length,
3099         .get_eeprom                 = ngbe_get_eeprom,
3100         .set_eeprom                 = ngbe_set_eeprom,
3101         .timesync_adjust_time       = ngbe_timesync_adjust_time,
3102         .timesync_read_time         = ngbe_timesync_read_time,
3103         .timesync_write_time        = ngbe_timesync_write_time,
3104         .tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3105 };
3106
3107 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3108 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3109 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3110
3111 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3112 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3113
3114 #ifdef RTE_ETHDEV_DEBUG_RX
3115         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3116 #endif
3117 #ifdef RTE_ETHDEV_DEBUG_TX
3118         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3119 #endif