net/ngbe: support basic statistics
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
21 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
22 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
23                                         uint16_t queue);
24
25 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
29 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
30 static void ngbe_dev_interrupt_handler(void *param);
31 static void ngbe_dev_interrupt_delayed_handler(void *param);
32 static void ngbe_configure_msix(struct rte_eth_dev *dev);
33
34 #define NGBE_SET_HWSTRIP(h, q) do {\
35                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
36                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
37                 (h)->bitmap[idx] |= 1 << bit;\
38         } while (0)
39
40 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
41                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
42                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
43                 (h)->bitmap[idx] &= ~(1 << bit);\
44         } while (0)
45
46 #define NGBE_GET_HWSTRIP(h, q, r) do {\
47                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49                 (r) = (h)->bitmap[idx] >> bit & 1;\
50         } while (0)
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_ngbe_map[] = {
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
68         { .vendor_id = 0, /* sentinel */ },
69 };
70
71 static const struct rte_eth_desc_lim rx_desc_lim = {
72         .nb_max = NGBE_RING_DESC_MAX,
73         .nb_min = NGBE_RING_DESC_MIN,
74         .nb_align = NGBE_RXD_ALIGN,
75 };
76
77 static const struct rte_eth_desc_lim tx_desc_lim = {
78         .nb_max = NGBE_RING_DESC_MAX,
79         .nb_min = NGBE_RING_DESC_MIN,
80         .nb_align = NGBE_TXD_ALIGN,
81         .nb_seg_max = NGBE_TX_MAX_SEG,
82         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
83 };
84
85 static const struct eth_dev_ops ngbe_eth_dev_ops;
86
87 static inline int32_t
88 ngbe_pf_reset_hw(struct ngbe_hw *hw)
89 {
90         uint32_t ctrl_ext;
91         int32_t status;
92
93         status = hw->mac.reset_hw(hw);
94
95         ctrl_ext = rd32(hw, NGBE_PORTCTL);
96         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
97         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
98         wr32(hw, NGBE_PORTCTL, ctrl_ext);
99         ngbe_flush(hw);
100
101         if (status == NGBE_ERR_SFP_NOT_PRESENT)
102                 status = 0;
103         return status;
104 }
105
106 static inline void
107 ngbe_enable_intr(struct rte_eth_dev *dev)
108 {
109         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
110         struct ngbe_hw *hw = ngbe_dev_hw(dev);
111
112         wr32(hw, NGBE_IENMISC, intr->mask_misc);
113         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
114         ngbe_flush(hw);
115 }
116
117 static void
118 ngbe_disable_intr(struct ngbe_hw *hw)
119 {
120         PMD_INIT_FUNC_TRACE();
121
122         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
123         ngbe_flush(hw);
124 }
125
126 /*
127  * Ensure that all locks are released before first NVM or PHY access
128  */
129 static void
130 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
131 {
132         uint16_t mask;
133
134         /*
135          * These ones are more tricky since they are common to all ports; but
136          * swfw_sync retries last long enough (1s) to be almost sure that if
137          * lock can not be taken it is due to an improper lock of the
138          * semaphore.
139          */
140         mask = NGBE_MNGSEM_SWPHY |
141                NGBE_MNGSEM_SWMBX |
142                NGBE_MNGSEM_SWFLASH;
143         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
144                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
145
146         hw->mac.release_swfw_sync(hw, mask);
147 }
148
149 static int
150 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
151 {
152         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
153         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
154         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
155         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
156         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
157         const struct rte_memzone *mz;
158         uint32_t ctrl_ext;
159         int err;
160
161         PMD_INIT_FUNC_TRACE();
162
163         eth_dev->dev_ops = &ngbe_eth_dev_ops;
164         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
165         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
166         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
167
168         /*
169          * For secondary processes, we don't initialise any further as primary
170          * has already done this work. Only check we don't need a different
171          * Rx and Tx function.
172          */
173         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
174                 struct ngbe_tx_queue *txq;
175                 /* Tx queue function in primary, set by last queue initialized
176                  * Tx queue may not initialized by primary process
177                  */
178                 if (eth_dev->data->tx_queues) {
179                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
180                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
181                         ngbe_set_tx_function(eth_dev, txq);
182                 } else {
183                         /* Use default Tx function if we get here */
184                         PMD_INIT_LOG(NOTICE,
185                                 "No Tx queues configured yet. Using default Tx function.");
186                 }
187
188                 ngbe_set_rx_function(eth_dev);
189
190                 return 0;
191         }
192
193         rte_eth_copy_pci_info(eth_dev, pci_dev);
194         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
195
196         /* Vendor and Device ID need to be set before init of shared code */
197         hw->device_id = pci_dev->id.device_id;
198         hw->vendor_id = pci_dev->id.vendor_id;
199         hw->sub_system_id = pci_dev->id.subsystem_device_id;
200         ngbe_map_device_id(hw);
201         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
202
203         /* Reserve memory for interrupt status block */
204         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
205                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
206         if (mz == NULL)
207                 return -ENOMEM;
208
209         hw->isb_dma = TMZ_PADDR(mz);
210         hw->isb_mem = TMZ_VADDR(mz);
211
212         /* Initialize the shared code (base driver) */
213         err = ngbe_init_shared_code(hw);
214         if (err != 0) {
215                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
216                 return -EIO;
217         }
218
219         /* Unlock any pending hardware semaphore */
220         ngbe_swfw_lock_reset(hw);
221
222         err = hw->rom.init_params(hw);
223         if (err != 0) {
224                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
225                 return -EIO;
226         }
227
228         /* Make sure we have a good EEPROM before we read from it */
229         err = hw->rom.validate_checksum(hw, NULL);
230         if (err != 0) {
231                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
232                 return -EIO;
233         }
234
235         err = hw->mac.init_hw(hw);
236         if (err != 0) {
237                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
238                 return -EIO;
239         }
240
241         /* Reset the hw statistics */
242         ngbe_dev_stats_reset(eth_dev);
243
244         /* disable interrupt */
245         ngbe_disable_intr(hw);
246
247         /* Allocate memory for storing MAC addresses */
248         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
249                                                hw->mac.num_rar_entries, 0);
250         if (eth_dev->data->mac_addrs == NULL) {
251                 PMD_INIT_LOG(ERR,
252                              "Failed to allocate %u bytes needed to store MAC addresses",
253                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
254                 return -ENOMEM;
255         }
256
257         /* Copy the permanent MAC address */
258         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
259                         &eth_dev->data->mac_addrs[0]);
260
261         /* Allocate memory for storing hash filter MAC addresses */
262         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
263                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
264         if (eth_dev->data->hash_mac_addrs == NULL) {
265                 PMD_INIT_LOG(ERR,
266                              "Failed to allocate %d bytes needed to store MAC addresses",
267                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
268                 rte_free(eth_dev->data->mac_addrs);
269                 eth_dev->data->mac_addrs = NULL;
270                 return -ENOMEM;
271         }
272
273         /* initialize the vfta */
274         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
275
276         /* initialize the hw strip bitmap*/
277         memset(hwstrip, 0, sizeof(*hwstrip));
278
279         ctrl_ext = rd32(hw, NGBE_PORTCTL);
280         /* let hardware know driver is loaded */
281         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
282         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
283         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
284         wr32(hw, NGBE_PORTCTL, ctrl_ext);
285         ngbe_flush(hw);
286
287         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
288                         (int)hw->mac.type, (int)hw->phy.type);
289
290         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
291                      eth_dev->data->port_id, pci_dev->id.vendor_id,
292                      pci_dev->id.device_id);
293
294         rte_intr_callback_register(intr_handle,
295                                    ngbe_dev_interrupt_handler, eth_dev);
296
297         /* enable uio/vfio intr/eventfd mapping */
298         rte_intr_enable(intr_handle);
299
300         /* enable support intr */
301         ngbe_enable_intr(eth_dev);
302
303         return 0;
304 }
305
306 static int
307 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
308 {
309         PMD_INIT_FUNC_TRACE();
310
311         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
312                 return 0;
313
314         ngbe_dev_close(eth_dev);
315
316         return 0;
317 }
318
319 static int
320 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
321                 struct rte_pci_device *pci_dev)
322 {
323         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
324                         sizeof(struct ngbe_adapter),
325                         eth_dev_pci_specific_init, pci_dev,
326                         eth_ngbe_dev_init, NULL);
327 }
328
329 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
330 {
331         struct rte_eth_dev *ethdev;
332
333         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
334         if (ethdev == NULL)
335                 return 0;
336
337         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
338 }
339
340 static struct rte_pci_driver rte_ngbe_pmd = {
341         .id_table = pci_id_ngbe_map,
342         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
343                      RTE_PCI_DRV_INTR_LSC,
344         .probe = eth_ngbe_pci_probe,
345         .remove = eth_ngbe_pci_remove,
346 };
347
348 static int
349 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
350 {
351         struct ngbe_hw *hw = ngbe_dev_hw(dev);
352         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
353         uint32_t vfta;
354         uint32_t vid_idx;
355         uint32_t vid_bit;
356
357         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
358         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
359         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
360         if (on)
361                 vfta |= vid_bit;
362         else
363                 vfta &= ~vid_bit;
364         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
365
366         /* update local VFTA copy */
367         shadow_vfta->vfta[vid_idx] = vfta;
368
369         return 0;
370 }
371
372 static void
373 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
374 {
375         struct ngbe_hw *hw = ngbe_dev_hw(dev);
376         struct ngbe_rx_queue *rxq;
377         bool restart;
378         uint32_t rxcfg, rxbal, rxbah;
379
380         if (on)
381                 ngbe_vlan_hw_strip_enable(dev, queue);
382         else
383                 ngbe_vlan_hw_strip_disable(dev, queue);
384
385         rxq = dev->data->rx_queues[queue];
386         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
387         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
388         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
389         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
390                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
391                         !(rxcfg & NGBE_RXCFG_VLAN);
392                 rxcfg |= NGBE_RXCFG_VLAN;
393         } else {
394                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
395                         (rxcfg & NGBE_RXCFG_VLAN);
396                 rxcfg &= ~NGBE_RXCFG_VLAN;
397         }
398         rxcfg &= ~NGBE_RXCFG_ENA;
399
400         if (restart) {
401                 /* set vlan strip for ring */
402                 ngbe_dev_rx_queue_stop(dev, queue);
403                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
404                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
405                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
406                 ngbe_dev_rx_queue_start(dev, queue);
407         }
408 }
409
410 static int
411 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
412                     enum rte_vlan_type vlan_type,
413                     uint16_t tpid)
414 {
415         struct ngbe_hw *hw = ngbe_dev_hw(dev);
416         int ret = 0;
417         uint32_t portctrl, vlan_ext, qinq;
418
419         portctrl = rd32(hw, NGBE_PORTCTL);
420
421         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
422         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
423         switch (vlan_type) {
424         case RTE_ETH_VLAN_TYPE_INNER:
425                 if (vlan_ext) {
426                         wr32m(hw, NGBE_VLANCTL,
427                                 NGBE_VLANCTL_TPID_MASK,
428                                 NGBE_VLANCTL_TPID(tpid));
429                         wr32m(hw, NGBE_DMATXCTRL,
430                                 NGBE_DMATXCTRL_TPID_MASK,
431                                 NGBE_DMATXCTRL_TPID(tpid));
432                 } else {
433                         ret = -ENOTSUP;
434                         PMD_DRV_LOG(ERR,
435                                 "Inner type is not supported by single VLAN");
436                 }
437
438                 if (qinq) {
439                         wr32m(hw, NGBE_TAGTPID(0),
440                                 NGBE_TAGTPID_LSB_MASK,
441                                 NGBE_TAGTPID_LSB(tpid));
442                 }
443                 break;
444         case RTE_ETH_VLAN_TYPE_OUTER:
445                 if (vlan_ext) {
446                         /* Only the high 16-bits is valid */
447                         wr32m(hw, NGBE_EXTAG,
448                                 NGBE_EXTAG_VLAN_MASK,
449                                 NGBE_EXTAG_VLAN(tpid));
450                 } else {
451                         wr32m(hw, NGBE_VLANCTL,
452                                 NGBE_VLANCTL_TPID_MASK,
453                                 NGBE_VLANCTL_TPID(tpid));
454                         wr32m(hw, NGBE_DMATXCTRL,
455                                 NGBE_DMATXCTRL_TPID_MASK,
456                                 NGBE_DMATXCTRL_TPID(tpid));
457                 }
458
459                 if (qinq) {
460                         wr32m(hw, NGBE_TAGTPID(0),
461                                 NGBE_TAGTPID_MSB_MASK,
462                                 NGBE_TAGTPID_MSB(tpid));
463                 }
464                 break;
465         default:
466                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
467                 return -EINVAL;
468         }
469
470         return ret;
471 }
472
473 void
474 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
475 {
476         struct ngbe_hw *hw = ngbe_dev_hw(dev);
477         uint32_t vlnctrl;
478
479         PMD_INIT_FUNC_TRACE();
480
481         /* Filter Table Disable */
482         vlnctrl = rd32(hw, NGBE_VLANCTL);
483         vlnctrl &= ~NGBE_VLANCTL_VFE;
484         wr32(hw, NGBE_VLANCTL, vlnctrl);
485 }
486
487 void
488 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
489 {
490         struct ngbe_hw *hw = ngbe_dev_hw(dev);
491         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
492         uint32_t vlnctrl;
493         uint16_t i;
494
495         PMD_INIT_FUNC_TRACE();
496
497         /* Filter Table Enable */
498         vlnctrl = rd32(hw, NGBE_VLANCTL);
499         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
500         vlnctrl |= NGBE_VLANCTL_VFE;
501         wr32(hw, NGBE_VLANCTL, vlnctrl);
502
503         /* write whatever is in local vfta copy */
504         for (i = 0; i < NGBE_VFTA_SIZE; i++)
505                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
506 }
507
508 void
509 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
510 {
511         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
512         struct ngbe_rx_queue *rxq;
513
514         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
515                 return;
516
517         if (on)
518                 NGBE_SET_HWSTRIP(hwstrip, queue);
519         else
520                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
521
522         if (queue >= dev->data->nb_rx_queues)
523                 return;
524
525         rxq = dev->data->rx_queues[queue];
526
527         if (on) {
528                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
529                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
530         } else {
531                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
532                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
533         }
534 }
535
536 static void
537 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
538 {
539         struct ngbe_hw *hw = ngbe_dev_hw(dev);
540         uint32_t ctrl;
541
542         PMD_INIT_FUNC_TRACE();
543
544         ctrl = rd32(hw, NGBE_RXCFG(queue));
545         ctrl &= ~NGBE_RXCFG_VLAN;
546         wr32(hw, NGBE_RXCFG(queue), ctrl);
547
548         /* record those setting for HW strip per queue */
549         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
550 }
551
552 static void
553 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
554 {
555         struct ngbe_hw *hw = ngbe_dev_hw(dev);
556         uint32_t ctrl;
557
558         PMD_INIT_FUNC_TRACE();
559
560         ctrl = rd32(hw, NGBE_RXCFG(queue));
561         ctrl |= NGBE_RXCFG_VLAN;
562         wr32(hw, NGBE_RXCFG(queue), ctrl);
563
564         /* record those setting for HW strip per queue */
565         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
566 }
567
568 static void
569 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
570 {
571         struct ngbe_hw *hw = ngbe_dev_hw(dev);
572         uint32_t ctrl;
573
574         PMD_INIT_FUNC_TRACE();
575
576         ctrl = rd32(hw, NGBE_PORTCTL);
577         ctrl &= ~NGBE_PORTCTL_VLANEXT;
578         ctrl &= ~NGBE_PORTCTL_QINQ;
579         wr32(hw, NGBE_PORTCTL, ctrl);
580 }
581
582 static void
583 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
584 {
585         struct ngbe_hw *hw = ngbe_dev_hw(dev);
586         uint32_t ctrl;
587
588         PMD_INIT_FUNC_TRACE();
589
590         ctrl  = rd32(hw, NGBE_PORTCTL);
591         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
592         wr32(hw, NGBE_PORTCTL, ctrl);
593 }
594
595 static void
596 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
597 {
598         struct ngbe_hw *hw = ngbe_dev_hw(dev);
599         uint32_t ctrl;
600
601         PMD_INIT_FUNC_TRACE();
602
603         ctrl = rd32(hw, NGBE_PORTCTL);
604         ctrl &= ~NGBE_PORTCTL_QINQ;
605         wr32(hw, NGBE_PORTCTL, ctrl);
606 }
607
608 static void
609 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
610 {
611         struct ngbe_hw *hw = ngbe_dev_hw(dev);
612         uint32_t ctrl;
613
614         PMD_INIT_FUNC_TRACE();
615
616         ctrl  = rd32(hw, NGBE_PORTCTL);
617         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
618         wr32(hw, NGBE_PORTCTL, ctrl);
619 }
620
621 void
622 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
623 {
624         struct ngbe_rx_queue *rxq;
625         uint16_t i;
626
627         PMD_INIT_FUNC_TRACE();
628
629         for (i = 0; i < dev->data->nb_rx_queues; i++) {
630                 rxq = dev->data->rx_queues[i];
631
632                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
633                         ngbe_vlan_hw_strip_enable(dev, i);
634                 else
635                         ngbe_vlan_hw_strip_disable(dev, i);
636         }
637 }
638
639 void
640 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
641 {
642         uint16_t i;
643         struct rte_eth_rxmode *rxmode;
644         struct ngbe_rx_queue *rxq;
645
646         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
647                 rxmode = &dev->data->dev_conf.rxmode;
648                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
649                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
650                                 rxq = dev->data->rx_queues[i];
651                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
652                         }
653                 else
654                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
655                                 rxq = dev->data->rx_queues[i];
656                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
657                         }
658         }
659 }
660
661 static int
662 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
663 {
664         struct rte_eth_rxmode *rxmode;
665         rxmode = &dev->data->dev_conf.rxmode;
666
667         if (mask & RTE_ETH_VLAN_STRIP_MASK)
668                 ngbe_vlan_hw_strip_config(dev);
669
670         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
671                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
672                         ngbe_vlan_hw_filter_enable(dev);
673                 else
674                         ngbe_vlan_hw_filter_disable(dev);
675         }
676
677         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
678                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
679                         ngbe_vlan_hw_extend_enable(dev);
680                 else
681                         ngbe_vlan_hw_extend_disable(dev);
682         }
683
684         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
685                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
686                         ngbe_qinq_hw_strip_enable(dev);
687                 else
688                         ngbe_qinq_hw_strip_disable(dev);
689         }
690
691         return 0;
692 }
693
694 static int
695 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
696 {
697         ngbe_config_vlan_strip_on_all_queues(dev, mask);
698
699         ngbe_vlan_offload_config(dev, mask);
700
701         return 0;
702 }
703
704 static int
705 ngbe_dev_configure(struct rte_eth_dev *dev)
706 {
707         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
708         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
709
710         PMD_INIT_FUNC_TRACE();
711
712         /* set flag to update link status after init */
713         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
714
715         /*
716          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
717          * allocation Rx preconditions we will reset it.
718          */
719         adapter->rx_bulk_alloc_allowed = true;
720
721         return 0;
722 }
723
724 static void
725 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
726 {
727         struct ngbe_hw *hw = ngbe_dev_hw(dev);
728         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
729
730         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
731         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
732         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
733         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
734                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
735         else
736                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
737
738         intr->mask_misc |= NGBE_ICRMISC_GPIO;
739 }
740
741 /*
742  * Configure device link speed and setup link.
743  * It returns 0 on success.
744  */
745 static int
746 ngbe_dev_start(struct rte_eth_dev *dev)
747 {
748         struct ngbe_hw *hw = ngbe_dev_hw(dev);
749         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
750         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
751         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
752         uint32_t intr_vector = 0;
753         int err;
754         bool link_up = false, negotiate = false;
755         uint32_t speed = 0;
756         uint32_t allowed_speeds = 0;
757         int mask = 0;
758         int status;
759         uint32_t *link_speeds;
760
761         PMD_INIT_FUNC_TRACE();
762
763         /* disable uio/vfio intr/eventfd mapping */
764         rte_intr_disable(intr_handle);
765
766         /* stop adapter */
767         hw->adapter_stopped = 0;
768         ngbe_stop_hw(hw);
769
770         /* reinitialize adapter, this calls reset and start */
771         hw->nb_rx_queues = dev->data->nb_rx_queues;
772         hw->nb_tx_queues = dev->data->nb_tx_queues;
773         status = ngbe_pf_reset_hw(hw);
774         if (status != 0)
775                 return -1;
776         hw->mac.start_hw(hw);
777         hw->mac.get_link_status = true;
778
779         ngbe_dev_phy_intr_setup(dev);
780
781         /* check and configure queue intr-vector mapping */
782         if ((rte_intr_cap_multiple(intr_handle) ||
783              !RTE_ETH_DEV_SRIOV(dev).active) &&
784             dev->data->dev_conf.intr_conf.rxq != 0) {
785                 intr_vector = dev->data->nb_rx_queues;
786                 if (rte_intr_efd_enable(intr_handle, intr_vector))
787                         return -1;
788         }
789
790         if (rte_intr_dp_is_en(intr_handle)) {
791                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
792                                                    dev->data->nb_rx_queues)) {
793                         PMD_INIT_LOG(ERR,
794                                      "Failed to allocate %d rx_queues intr_vec",
795                                      dev->data->nb_rx_queues);
796                         return -ENOMEM;
797                 }
798         }
799
800         /* confiugre MSI-X for sleep until Rx interrupt */
801         ngbe_configure_msix(dev);
802
803         /* initialize transmission unit */
804         ngbe_dev_tx_init(dev);
805
806         /* This can fail when allocating mbufs for descriptor rings */
807         err = ngbe_dev_rx_init(dev);
808         if (err != 0) {
809                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
810                 goto error;
811         }
812
813         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
814                 RTE_ETH_VLAN_EXTEND_MASK;
815         err = ngbe_vlan_offload_config(dev, mask);
816         if (err != 0) {
817                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
818                 goto error;
819         }
820
821         ngbe_configure_port(dev);
822
823         err = ngbe_dev_rxtx_start(dev);
824         if (err < 0) {
825                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
826                 goto error;
827         }
828
829         err = hw->mac.check_link(hw, &speed, &link_up, 0);
830         if (err != 0)
831                 goto error;
832         dev->data->dev_link.link_status = link_up;
833
834         link_speeds = &dev->data->dev_conf.link_speeds;
835         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
836                 negotiate = true;
837
838         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
839         if (err != 0)
840                 goto error;
841
842         allowed_speeds = 0;
843         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
844                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
845         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
846                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
847         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
848                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
849
850         if (*link_speeds & ~allowed_speeds) {
851                 PMD_INIT_LOG(ERR, "Invalid link setting");
852                 goto error;
853         }
854
855         speed = 0x0;
856         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
857                 speed = hw->mac.default_speeds;
858         } else {
859                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
860                         speed |= NGBE_LINK_SPEED_1GB_FULL;
861                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
862                         speed |= NGBE_LINK_SPEED_100M_FULL;
863                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
864                         speed |= NGBE_LINK_SPEED_10M_FULL;
865         }
866
867         hw->phy.init_hw(hw);
868         err = hw->mac.setup_link(hw, speed, link_up);
869         if (err != 0)
870                 goto error;
871
872         if (rte_intr_allow_others(intr_handle)) {
873                 ngbe_dev_misc_interrupt_setup(dev);
874                 /* check if lsc interrupt is enabled */
875                 if (dev->data->dev_conf.intr_conf.lsc != 0)
876                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
877                 else
878                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
879                 ngbe_dev_macsec_interrupt_setup(dev);
880                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
881         } else {
882                 rte_intr_callback_unregister(intr_handle,
883                                              ngbe_dev_interrupt_handler, dev);
884                 if (dev->data->dev_conf.intr_conf.lsc != 0)
885                         PMD_INIT_LOG(INFO,
886                                      "LSC won't enable because of no intr multiplex");
887         }
888
889         /* check if rxq interrupt is enabled */
890         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
891             rte_intr_dp_is_en(intr_handle))
892                 ngbe_dev_rxq_interrupt_setup(dev);
893
894         /* enable UIO/VFIO intr/eventfd mapping */
895         rte_intr_enable(intr_handle);
896
897         /* resume enabled intr since HW reset */
898         ngbe_enable_intr(dev);
899
900         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
901                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
902                 /* gpio0 is used to power on/off control*/
903                 wr32(hw, NGBE_GPIODATA, 0);
904         }
905
906         /*
907          * Update link status right before return, because it may
908          * start link configuration process in a separate thread.
909          */
910         ngbe_dev_link_update(dev, 0);
911
912         ngbe_read_stats_registers(hw, hw_stats);
913         hw->offset_loaded = 1;
914
915         return 0;
916
917 error:
918         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
919         ngbe_dev_clear_queues(dev);
920         return -EIO;
921 }
922
923 /*
924  * Stop device: disable rx and tx functions to allow for reconfiguring.
925  */
926 static int
927 ngbe_dev_stop(struct rte_eth_dev *dev)
928 {
929         struct rte_eth_link link;
930         struct ngbe_hw *hw = ngbe_dev_hw(dev);
931         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
932         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
933
934         if (hw->adapter_stopped)
935                 return 0;
936
937         PMD_INIT_FUNC_TRACE();
938
939         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
940                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
941                 /* gpio0 is used to power on/off control*/
942                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
943         }
944
945         /* disable interrupts */
946         ngbe_disable_intr(hw);
947
948         /* reset the NIC */
949         ngbe_pf_reset_hw(hw);
950         hw->adapter_stopped = 0;
951
952         /* stop adapter */
953         ngbe_stop_hw(hw);
954
955         ngbe_dev_clear_queues(dev);
956
957         /* Clear stored conf */
958         dev->data->scattered_rx = 0;
959
960         /* Clear recorded link status */
961         memset(&link, 0, sizeof(link));
962         rte_eth_linkstatus_set(dev, &link);
963
964         if (!rte_intr_allow_others(intr_handle))
965                 /* resume to the default handler */
966                 rte_intr_callback_register(intr_handle,
967                                            ngbe_dev_interrupt_handler,
968                                            (void *)dev);
969
970         /* Clean datapath event and queue/vec mapping */
971         rte_intr_efd_disable(intr_handle);
972         rte_intr_vec_list_free(intr_handle);
973
974         hw->adapter_stopped = true;
975         dev->data->dev_started = 0;
976
977         return 0;
978 }
979
980 /*
981  * Reset and stop device.
982  */
983 static int
984 ngbe_dev_close(struct rte_eth_dev *dev)
985 {
986         struct ngbe_hw *hw = ngbe_dev_hw(dev);
987         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
988         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
989         int retries = 0;
990         int ret;
991
992         PMD_INIT_FUNC_TRACE();
993
994         ngbe_pf_reset_hw(hw);
995
996         ngbe_dev_stop(dev);
997
998         ngbe_dev_free_queues(dev);
999
1000         /* reprogram the RAR[0] in case user changed it. */
1001         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1002
1003         /* Unlock any pending hardware semaphore */
1004         ngbe_swfw_lock_reset(hw);
1005
1006         /* disable uio intr before callback unregister */
1007         rte_intr_disable(intr_handle);
1008
1009         do {
1010                 ret = rte_intr_callback_unregister(intr_handle,
1011                                 ngbe_dev_interrupt_handler, dev);
1012                 if (ret >= 0 || ret == -ENOENT) {
1013                         break;
1014                 } else if (ret != -EAGAIN) {
1015                         PMD_INIT_LOG(ERR,
1016                                 "intr callback unregister failed: %d",
1017                                 ret);
1018                 }
1019                 rte_delay_ms(100);
1020         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1021
1022         rte_free(dev->data->mac_addrs);
1023         dev->data->mac_addrs = NULL;
1024
1025         rte_free(dev->data->hash_mac_addrs);
1026         dev->data->hash_mac_addrs = NULL;
1027
1028         return ret;
1029 }
1030
1031 /*
1032  * Reset PF device.
1033  */
1034 static int
1035 ngbe_dev_reset(struct rte_eth_dev *dev)
1036 {
1037         int ret;
1038
1039         ret = eth_ngbe_dev_uninit(dev);
1040         if (ret != 0)
1041                 return ret;
1042
1043         ret = eth_ngbe_dev_init(dev, NULL);
1044
1045         return ret;
1046 }
1047
1048 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1049         {                                                       \
1050                 uint32_t current_counter = rd32(hw, reg);       \
1051                 if (current_counter < last_counter)             \
1052                         current_counter += 0x100000000LL;       \
1053                 if (!hw->offset_loaded)                         \
1054                         last_counter = current_counter;         \
1055                 counter = current_counter - last_counter;       \
1056                 counter &= 0xFFFFFFFFLL;                        \
1057         }
1058
1059 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1060         {                                                                \
1061                 uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1062                 uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1063                 uint64_t current_counter = (current_counter_msb << 32) | \
1064                         current_counter_lsb;                             \
1065                 if (current_counter < last_counter)                      \
1066                         current_counter += 0x1000000000LL;               \
1067                 if (!hw->offset_loaded)                                  \
1068                         last_counter = current_counter;                  \
1069                 counter = current_counter - last_counter;                \
1070                 counter &= 0xFFFFFFFFFLL;                                \
1071         }
1072
1073 void
1074 ngbe_read_stats_registers(struct ngbe_hw *hw,
1075                            struct ngbe_hw_stats *hw_stats)
1076 {
1077         unsigned int i;
1078
1079         /* QP Stats */
1080         for (i = 0; i < hw->nb_rx_queues; i++) {
1081                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1082                         hw->qp_last[i].rx_qp_packets,
1083                         hw_stats->qp[i].rx_qp_packets);
1084                 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1085                         hw->qp_last[i].rx_qp_bytes,
1086                         hw_stats->qp[i].rx_qp_bytes);
1087                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1088                         hw->qp_last[i].rx_qp_mc_packets,
1089                         hw_stats->qp[i].rx_qp_mc_packets);
1090                 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1091                         hw->qp_last[i].rx_qp_bc_packets,
1092                         hw_stats->qp[i].rx_qp_bc_packets);
1093         }
1094
1095         for (i = 0; i < hw->nb_tx_queues; i++) {
1096                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1097                         hw->qp_last[i].tx_qp_packets,
1098                         hw_stats->qp[i].tx_qp_packets);
1099                 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1100                         hw->qp_last[i].tx_qp_bytes,
1101                         hw_stats->qp[i].tx_qp_bytes);
1102                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1103                         hw->qp_last[i].tx_qp_mc_packets,
1104                         hw_stats->qp[i].tx_qp_mc_packets);
1105                 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1106                         hw->qp_last[i].tx_qp_bc_packets,
1107                         hw_stats->qp[i].tx_qp_bc_packets);
1108         }
1109
1110         /* PB Stats */
1111         hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1112         hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1113         hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1114         hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1115         hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1116         hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1117
1118         hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1119         hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1120
1121         /* DMA Stats */
1122         hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1123         hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1124         hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1125         hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1126         hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1127         hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1128         hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1129         hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1130
1131         /* MAC Stats */
1132         hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1133         hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1134         hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1135
1136         hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1137         hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1138         hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1139
1140         hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1141         hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1142
1143         hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1144         hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1145         hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1146         hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1147         hw_stats->rx_size_512_to_1023_packets +=
1148                         rd64(hw, NGBE_MACRX512TO1023L);
1149         hw_stats->rx_size_1024_to_max_packets +=
1150                         rd64(hw, NGBE_MACRX1024TOMAXL);
1151         hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1152         hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1153         hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1154         hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1155         hw_stats->tx_size_512_to_1023_packets +=
1156                         rd64(hw, NGBE_MACTX512TO1023L);
1157         hw_stats->tx_size_1024_to_max_packets +=
1158                         rd64(hw, NGBE_MACTX1024TOMAXL);
1159
1160         hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1161         hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1162         hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1163
1164         /* MNG Stats */
1165         hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1166         hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1167         hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1168         hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1169
1170         /* MACsec Stats */
1171         hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1172         hw_stats->tx_macsec_pkts_encrypted +=
1173                         rd32(hw, NGBE_LSECTX_ENCPKT);
1174         hw_stats->tx_macsec_pkts_protected +=
1175                         rd32(hw, NGBE_LSECTX_PROTPKT);
1176         hw_stats->tx_macsec_octets_encrypted +=
1177                         rd32(hw, NGBE_LSECTX_ENCOCT);
1178         hw_stats->tx_macsec_octets_protected +=
1179                         rd32(hw, NGBE_LSECTX_PROTOCT);
1180         hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1181         hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1182         hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1183         hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1184         hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1185         hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1186         hw_stats->rx_macsec_sc_pkts_unchecked +=
1187                         rd32(hw, NGBE_LSECRX_UNCHKPKT);
1188         hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1189         hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1190         for (i = 0; i < 2; i++) {
1191                 hw_stats->rx_macsec_sa_pkts_ok +=
1192                         rd32(hw, NGBE_LSECRX_OKPKT(i));
1193                 hw_stats->rx_macsec_sa_pkts_invalid +=
1194                         rd32(hw, NGBE_LSECRX_INVPKT(i));
1195                 hw_stats->rx_macsec_sa_pkts_notvalid +=
1196                         rd32(hw, NGBE_LSECRX_BADPKT(i));
1197         }
1198         for (i = 0; i < 4; i++) {
1199                 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1200                         rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1201                 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1202                         rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1203         }
1204         hw_stats->rx_total_missed_packets =
1205                         hw_stats->rx_up_dropped;
1206 }
1207
1208 static int
1209 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1210 {
1211         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1212         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1213         struct ngbe_stat_mappings *stat_mappings =
1214                         NGBE_DEV_STAT_MAPPINGS(dev);
1215         uint32_t i, j;
1216
1217         ngbe_read_stats_registers(hw, hw_stats);
1218
1219         if (stats == NULL)
1220                 return -EINVAL;
1221
1222         /* Fill out the rte_eth_stats statistics structure */
1223         stats->ipackets = hw_stats->rx_packets;
1224         stats->ibytes = hw_stats->rx_bytes;
1225         stats->opackets = hw_stats->tx_packets;
1226         stats->obytes = hw_stats->tx_bytes;
1227
1228         memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1229         memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1230         memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1231         memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1232         memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1233         for (i = 0; i < NGBE_MAX_QP; i++) {
1234                 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1235                 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1236                 uint32_t q_map;
1237
1238                 q_map = (stat_mappings->rqsm[n] >> offset)
1239                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1240                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1241                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1242                 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1243                 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1244
1245                 q_map = (stat_mappings->tqsm[n] >> offset)
1246                                 & QMAP_FIELD_RESERVED_BITS_MASK;
1247                 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1248                      ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1249                 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1250                 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1251         }
1252
1253         /* Rx Errors */
1254         stats->imissed  = hw_stats->rx_total_missed_packets +
1255                           hw_stats->rx_dma_drop;
1256         stats->ierrors  = hw_stats->rx_crc_errors +
1257                           hw_stats->rx_mac_short_packet_dropped +
1258                           hw_stats->rx_length_errors +
1259                           hw_stats->rx_undersize_errors +
1260                           hw_stats->rx_oversize_errors +
1261                           hw_stats->rx_illegal_byte_errors +
1262                           hw_stats->rx_error_bytes +
1263                           hw_stats->rx_fragment_errors;
1264
1265         /* Tx Errors */
1266         stats->oerrors  = 0;
1267         return 0;
1268 }
1269
1270 static int
1271 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1272 {
1273         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1274         struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1275
1276         /* HW registers are cleared on read */
1277         hw->offset_loaded = 0;
1278         ngbe_dev_stats_get(dev, NULL);
1279         hw->offset_loaded = 1;
1280
1281         /* Reset software totals */
1282         memset(hw_stats, 0, sizeof(*hw_stats));
1283
1284         return 0;
1285 }
1286
1287 static int
1288 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1289 {
1290         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1291
1292         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1293         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1294         dev_info->min_rx_bufsize = 1024;
1295         dev_info->max_rx_pktlen = 15872;
1296         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1297         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1298                                      dev_info->rx_queue_offload_capa);
1299         dev_info->tx_queue_offload_capa = 0;
1300         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1301
1302         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1303                 .rx_thresh = {
1304                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1305                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1306                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1307                 },
1308                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1309                 .rx_drop_en = 0,
1310                 .offloads = 0,
1311         };
1312
1313         dev_info->default_txconf = (struct rte_eth_txconf) {
1314                 .tx_thresh = {
1315                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1316                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1317                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1318                 },
1319                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1320                 .offloads = 0,
1321         };
1322
1323         dev_info->rx_desc_lim = rx_desc_lim;
1324         dev_info->tx_desc_lim = tx_desc_lim;
1325
1326         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1327                                 RTE_ETH_LINK_SPEED_10M;
1328
1329         /* Driver-preferred Rx/Tx parameters */
1330         dev_info->default_rxportconf.burst_size = 32;
1331         dev_info->default_txportconf.burst_size = 32;
1332         dev_info->default_rxportconf.nb_queues = 1;
1333         dev_info->default_txportconf.nb_queues = 1;
1334         dev_info->default_rxportconf.ring_size = 256;
1335         dev_info->default_txportconf.ring_size = 256;
1336
1337         return 0;
1338 }
1339
1340 const uint32_t *
1341 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1342 {
1343         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1344             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1345             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1346             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1347                 return ngbe_get_supported_ptypes();
1348
1349         return NULL;
1350 }
1351
1352 /* return 0 means link status changed, -1 means not changed */
1353 int
1354 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1355                             int wait_to_complete)
1356 {
1357         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1358         struct rte_eth_link link;
1359         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1360         u32 lan_speed = 0;
1361         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1362         bool link_up;
1363         int err;
1364         int wait = 1;
1365
1366         memset(&link, 0, sizeof(link));
1367         link.link_status = RTE_ETH_LINK_DOWN;
1368         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1369         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1370         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1371                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1372
1373         hw->mac.get_link_status = true;
1374
1375         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1376                 return rte_eth_linkstatus_set(dev, &link);
1377
1378         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1379         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1380                 wait = 0;
1381
1382         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1383         if (err != 0) {
1384                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1385                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1386                 return rte_eth_linkstatus_set(dev, &link);
1387         }
1388
1389         if (!link_up)
1390                 return rte_eth_linkstatus_set(dev, &link);
1391
1392         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1393         link.link_status = RTE_ETH_LINK_UP;
1394         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1395
1396         switch (link_speed) {
1397         default:
1398         case NGBE_LINK_SPEED_UNKNOWN:
1399                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1400                 break;
1401
1402         case NGBE_LINK_SPEED_10M_FULL:
1403                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1404                 lan_speed = 0;
1405                 break;
1406
1407         case NGBE_LINK_SPEED_100M_FULL:
1408                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1409                 lan_speed = 1;
1410                 break;
1411
1412         case NGBE_LINK_SPEED_1GB_FULL:
1413                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1414                 lan_speed = 2;
1415                 break;
1416         }
1417
1418         if (hw->is_pf) {
1419                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1420                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1421                                 NGBE_LINK_SPEED_100M_FULL |
1422                                 NGBE_LINK_SPEED_10M_FULL)) {
1423                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1424                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1425                 }
1426         }
1427
1428         return rte_eth_linkstatus_set(dev, &link);
1429 }
1430
1431 static int
1432 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1433 {
1434         return ngbe_dev_link_update_share(dev, wait_to_complete);
1435 }
1436
1437 /**
1438  * It clears the interrupt causes and enables the interrupt.
1439  * It will be called once only during NIC initialized.
1440  *
1441  * @param dev
1442  *  Pointer to struct rte_eth_dev.
1443  * @param on
1444  *  Enable or Disable.
1445  *
1446  * @return
1447  *  - On success, zero.
1448  *  - On failure, a negative value.
1449  */
1450 static int
1451 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1452 {
1453         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1454
1455         ngbe_dev_link_status_print(dev);
1456         if (on != 0) {
1457                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1458                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1459         } else {
1460                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1461                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1462         }
1463
1464         return 0;
1465 }
1466
1467 /**
1468  * It clears the interrupt causes and enables the interrupt.
1469  * It will be called once only during NIC initialized.
1470  *
1471  * @param dev
1472  *  Pointer to struct rte_eth_dev.
1473  *
1474  * @return
1475  *  - On success, zero.
1476  *  - On failure, a negative value.
1477  */
1478 static int
1479 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1480 {
1481         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1482         u64 mask;
1483
1484         mask = NGBE_ICR_MASK;
1485         mask &= (1ULL << NGBE_MISC_VEC_ID);
1486         intr->mask |= mask;
1487         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1488
1489         return 0;
1490 }
1491
1492 /**
1493  * It clears the interrupt causes and enables the interrupt.
1494  * It will be called once only during NIC initialized.
1495  *
1496  * @param dev
1497  *  Pointer to struct rte_eth_dev.
1498  *
1499  * @return
1500  *  - On success, zero.
1501  *  - On failure, a negative value.
1502  */
1503 static int
1504 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1505 {
1506         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1507         u64 mask;
1508
1509         mask = NGBE_ICR_MASK;
1510         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1511         intr->mask |= mask;
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * It clears the interrupt causes and enables the interrupt.
1518  * It will be called once only during NIC initialized.
1519  *
1520  * @param dev
1521  *  Pointer to struct rte_eth_dev.
1522  *
1523  * @return
1524  *  - On success, zero.
1525  *  - On failure, a negative value.
1526  */
1527 static int
1528 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1529 {
1530         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1531
1532         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1533
1534         return 0;
1535 }
1536
1537 /*
1538  * It reads ICR and sets flag for the link_update.
1539  *
1540  * @param dev
1541  *  Pointer to struct rte_eth_dev.
1542  *
1543  * @return
1544  *  - On success, zero.
1545  *  - On failure, a negative value.
1546  */
1547 static int
1548 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1549 {
1550         uint32_t eicr;
1551         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1552         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1553
1554         /* clear all cause mask */
1555         ngbe_disable_intr(hw);
1556
1557         /* read-on-clear nic registers here */
1558         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1559         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1560
1561         intr->flags = 0;
1562
1563         /* set flag for async link update */
1564         if (eicr & NGBE_ICRMISC_PHY)
1565                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1566
1567         if (eicr & NGBE_ICRMISC_VFMBX)
1568                 intr->flags |= NGBE_FLAG_MAILBOX;
1569
1570         if (eicr & NGBE_ICRMISC_LNKSEC)
1571                 intr->flags |= NGBE_FLAG_MACSEC;
1572
1573         if (eicr & NGBE_ICRMISC_GPIO)
1574                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1575
1576         return 0;
1577 }
1578
1579 /**
1580  * It gets and then prints the link status.
1581  *
1582  * @param dev
1583  *  Pointer to struct rte_eth_dev.
1584  *
1585  * @return
1586  *  - On success, zero.
1587  *  - On failure, a negative value.
1588  */
1589 static void
1590 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1591 {
1592         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1593         struct rte_eth_link link;
1594
1595         rte_eth_linkstatus_get(dev, &link);
1596
1597         if (link.link_status == RTE_ETH_LINK_UP) {
1598                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1599                                         (int)(dev->data->port_id),
1600                                         (unsigned int)link.link_speed,
1601                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1602                                         "full-duplex" : "half-duplex");
1603         } else {
1604                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1605                                 (int)(dev->data->port_id));
1606         }
1607         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1608                                 pci_dev->addr.domain,
1609                                 pci_dev->addr.bus,
1610                                 pci_dev->addr.devid,
1611                                 pci_dev->addr.function);
1612 }
1613
1614 /*
1615  * It executes link_update after knowing an interrupt occurred.
1616  *
1617  * @param dev
1618  *  Pointer to struct rte_eth_dev.
1619  *
1620  * @return
1621  *  - On success, zero.
1622  *  - On failure, a negative value.
1623  */
1624 static int
1625 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
1626 {
1627         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1628         int64_t timeout;
1629
1630         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1631
1632         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1633                 struct rte_eth_link link;
1634
1635                 /*get the link status before link update, for predicting later*/
1636                 rte_eth_linkstatus_get(dev, &link);
1637
1638                 ngbe_dev_link_update(dev, 0);
1639
1640                 /* likely to up */
1641                 if (link.link_status != RTE_ETH_LINK_UP)
1642                         /* handle it 1 sec later, wait it being stable */
1643                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
1644                 /* likely to down */
1645                 else
1646                         /* handle it 4 sec later, wait it being stable */
1647                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
1648
1649                 ngbe_dev_link_status_print(dev);
1650                 if (rte_eal_alarm_set(timeout * 1000,
1651                                       ngbe_dev_interrupt_delayed_handler,
1652                                       (void *)dev) < 0) {
1653                         PMD_DRV_LOG(ERR, "Error setting alarm");
1654                 } else {
1655                         /* remember original mask */
1656                         intr->mask_misc_orig = intr->mask_misc;
1657                         /* only disable lsc interrupt */
1658                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1659
1660                         intr->mask_orig = intr->mask;
1661                         /* only disable all misc interrupts */
1662                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
1663                 }
1664         }
1665
1666         PMD_DRV_LOG(DEBUG, "enable intr immediately");
1667         ngbe_enable_intr(dev);
1668
1669         return 0;
1670 }
1671
1672 /**
1673  * Interrupt handler which shall be registered for alarm callback for delayed
1674  * handling specific interrupt to wait for the stable nic state. As the
1675  * NIC interrupt state is not stable for ngbe after link is just down,
1676  * it needs to wait 4 seconds to get the stable status.
1677  *
1678  * @param param
1679  *  The address of parameter (struct rte_eth_dev *) registered before.
1680  */
1681 static void
1682 ngbe_dev_interrupt_delayed_handler(void *param)
1683 {
1684         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1685         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1686         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1687         uint32_t eicr;
1688
1689         ngbe_disable_intr(hw);
1690
1691         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1692
1693         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1694                 ngbe_dev_link_update(dev, 0);
1695                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
1696                 ngbe_dev_link_status_print(dev);
1697                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1698                                               NULL);
1699         }
1700
1701         if (intr->flags & NGBE_FLAG_MACSEC) {
1702                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1703                                               NULL);
1704                 intr->flags &= ~NGBE_FLAG_MACSEC;
1705         }
1706
1707         /* restore original mask */
1708         intr->mask_misc = intr->mask_misc_orig;
1709         intr->mask_misc_orig = 0;
1710         intr->mask = intr->mask_orig;
1711         intr->mask_orig = 0;
1712
1713         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1714         ngbe_enable_intr(dev);
1715 }
1716
1717 /**
1718  * Interrupt handler triggered by NIC  for handling
1719  * specific interrupt.
1720  *
1721  * @param param
1722  *  The address of parameter (struct rte_eth_dev *) registered before.
1723  */
1724 static void
1725 ngbe_dev_interrupt_handler(void *param)
1726 {
1727         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1728
1729         ngbe_dev_interrupt_get_status(dev);
1730         ngbe_dev_interrupt_action(dev);
1731 }
1732
1733 /**
1734  * Set the IVAR registers, mapping interrupt causes to vectors
1735  * @param hw
1736  *  pointer to ngbe_hw struct
1737  * @direction
1738  *  0 for Rx, 1 for Tx, -1 for other causes
1739  * @queue
1740  *  queue to map the corresponding interrupt to
1741  * @msix_vector
1742  *  the vector to map to the corresponding queue
1743  */
1744 void
1745 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1746                    uint8_t queue, uint8_t msix_vector)
1747 {
1748         uint32_t tmp, idx;
1749
1750         if (direction == -1) {
1751                 /* other causes */
1752                 msix_vector |= NGBE_IVARMISC_VLD;
1753                 idx = 0;
1754                 tmp = rd32(hw, NGBE_IVARMISC);
1755                 tmp &= ~(0xFF << idx);
1756                 tmp |= (msix_vector << idx);
1757                 wr32(hw, NGBE_IVARMISC, tmp);
1758         } else {
1759                 /* rx or tx causes */
1760                 /* Workround for ICR lost */
1761                 idx = ((16 * (queue & 1)) + (8 * direction));
1762                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1763                 tmp &= ~(0xFF << idx);
1764                 tmp |= (msix_vector << idx);
1765                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1766         }
1767 }
1768
1769 /**
1770  * Sets up the hardware to properly generate MSI-X interrupts
1771  * @hw
1772  *  board private structure
1773  */
1774 static void
1775 ngbe_configure_msix(struct rte_eth_dev *dev)
1776 {
1777         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1778         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1779         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1780         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1781         uint32_t vec = NGBE_MISC_VEC_ID;
1782         uint32_t gpie;
1783
1784         /*
1785          * Won't configure MSI-X register if no mapping is done
1786          * between intr vector and event fd
1787          * but if MSI-X has been enabled already, need to configure
1788          * auto clean, auto mask and throttling.
1789          */
1790         gpie = rd32(hw, NGBE_GPIE);
1791         if (!rte_intr_dp_is_en(intr_handle) &&
1792             !(gpie & NGBE_GPIE_MSIX))
1793                 return;
1794
1795         if (rte_intr_allow_others(intr_handle)) {
1796                 base = NGBE_RX_VEC_START;
1797                 vec = base;
1798         }
1799
1800         /* setup GPIE for MSI-X mode */
1801         gpie = rd32(hw, NGBE_GPIE);
1802         gpie |= NGBE_GPIE_MSIX;
1803         wr32(hw, NGBE_GPIE, gpie);
1804
1805         /* Populate the IVAR table and set the ITR values to the
1806          * corresponding register.
1807          */
1808         if (rte_intr_dp_is_en(intr_handle)) {
1809                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1810                         queue_id++) {
1811                         /* by default, 1:1 mapping */
1812                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
1813                         rte_intr_vec_list_index_set(intr_handle,
1814                                                            queue_id, vec);
1815                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
1816                             - 1)
1817                                 vec++;
1818                 }
1819
1820                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1821         }
1822         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1823                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1824                         | NGBE_ITR_WRDSA);
1825 }
1826
1827 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1828         .dev_configure              = ngbe_dev_configure,
1829         .dev_infos_get              = ngbe_dev_info_get,
1830         .dev_start                  = ngbe_dev_start,
1831         .dev_stop                   = ngbe_dev_stop,
1832         .dev_close                  = ngbe_dev_close,
1833         .dev_reset                  = ngbe_dev_reset,
1834         .link_update                = ngbe_dev_link_update,
1835         .stats_get                  = ngbe_dev_stats_get,
1836         .stats_reset                = ngbe_dev_stats_reset,
1837         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
1838         .vlan_filter_set            = ngbe_vlan_filter_set,
1839         .vlan_tpid_set              = ngbe_vlan_tpid_set,
1840         .vlan_offload_set           = ngbe_vlan_offload_set,
1841         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
1842         .rx_queue_start             = ngbe_dev_rx_queue_start,
1843         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
1844         .tx_queue_start             = ngbe_dev_tx_queue_start,
1845         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
1846         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
1847         .rx_queue_release           = ngbe_dev_rx_queue_release,
1848         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
1849         .tx_queue_release           = ngbe_dev_tx_queue_release,
1850         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
1851         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
1852 };
1853
1854 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1855 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1856 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1857
1858 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1859 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1860
1861 #ifdef RTE_ETHDEV_DEBUG_RX
1862         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1863 #endif
1864 #ifdef RTE_ETHDEV_DEBUG_TX
1865         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
1866 #endif