net/ngbe: support VLAN offload and VLAN filter
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
21 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
22                                         uint16_t queue);
23
24 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
25 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
26 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
27 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
28 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
29 static void ngbe_dev_interrupt_handler(void *param);
30 static void ngbe_dev_interrupt_delayed_handler(void *param);
31 static void ngbe_configure_msix(struct rte_eth_dev *dev);
32
33 #define NGBE_SET_HWSTRIP(h, q) do {\
34                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
35                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
36                 (h)->bitmap[idx] |= 1 << bit;\
37         } while (0)
38
39 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
40                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
41                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
42                 (h)->bitmap[idx] &= ~(1 << bit);\
43         } while (0)
44
45 #define NGBE_GET_HWSTRIP(h, q, r) do {\
46                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
47                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
48                 (r) = (h)->bitmap[idx] >> bit & 1;\
49         } while (0)
50
51 /*
52  * The set of PCI devices this driver supports
53  */
54 static const struct rte_pci_id pci_id_ngbe_map[] = {
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
67         { .vendor_id = 0, /* sentinel */ },
68 };
69
70 static const struct rte_eth_desc_lim rx_desc_lim = {
71         .nb_max = NGBE_RING_DESC_MAX,
72         .nb_min = NGBE_RING_DESC_MIN,
73         .nb_align = NGBE_RXD_ALIGN,
74 };
75
76 static const struct rte_eth_desc_lim tx_desc_lim = {
77         .nb_max = NGBE_RING_DESC_MAX,
78         .nb_min = NGBE_RING_DESC_MIN,
79         .nb_align = NGBE_TXD_ALIGN,
80         .nb_seg_max = NGBE_TX_MAX_SEG,
81         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
82 };
83
84 static const struct eth_dev_ops ngbe_eth_dev_ops;
85
86 static inline int32_t
87 ngbe_pf_reset_hw(struct ngbe_hw *hw)
88 {
89         uint32_t ctrl_ext;
90         int32_t status;
91
92         status = hw->mac.reset_hw(hw);
93
94         ctrl_ext = rd32(hw, NGBE_PORTCTL);
95         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
96         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
97         wr32(hw, NGBE_PORTCTL, ctrl_ext);
98         ngbe_flush(hw);
99
100         if (status == NGBE_ERR_SFP_NOT_PRESENT)
101                 status = 0;
102         return status;
103 }
104
105 static inline void
106 ngbe_enable_intr(struct rte_eth_dev *dev)
107 {
108         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
109         struct ngbe_hw *hw = ngbe_dev_hw(dev);
110
111         wr32(hw, NGBE_IENMISC, intr->mask_misc);
112         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
113         ngbe_flush(hw);
114 }
115
116 static void
117 ngbe_disable_intr(struct ngbe_hw *hw)
118 {
119         PMD_INIT_FUNC_TRACE();
120
121         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
122         ngbe_flush(hw);
123 }
124
125 /*
126  * Ensure that all locks are released before first NVM or PHY access
127  */
128 static void
129 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
130 {
131         uint16_t mask;
132
133         /*
134          * These ones are more tricky since they are common to all ports; but
135          * swfw_sync retries last long enough (1s) to be almost sure that if
136          * lock can not be taken it is due to an improper lock of the
137          * semaphore.
138          */
139         mask = NGBE_MNGSEM_SWPHY |
140                NGBE_MNGSEM_SWMBX |
141                NGBE_MNGSEM_SWFLASH;
142         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
143                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
144
145         hw->mac.release_swfw_sync(hw, mask);
146 }
147
148 static int
149 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
150 {
151         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
152         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
153         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
154         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
155         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
156         const struct rte_memzone *mz;
157         uint32_t ctrl_ext;
158         int err;
159
160         PMD_INIT_FUNC_TRACE();
161
162         eth_dev->dev_ops = &ngbe_eth_dev_ops;
163         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
164         eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
165         eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
166
167         /*
168          * For secondary processes, we don't initialise any further as primary
169          * has already done this work. Only check we don't need a different
170          * Rx and Tx function.
171          */
172         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
173                 struct ngbe_tx_queue *txq;
174                 /* Tx queue function in primary, set by last queue initialized
175                  * Tx queue may not initialized by primary process
176                  */
177                 if (eth_dev->data->tx_queues) {
178                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
179                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
180                         ngbe_set_tx_function(eth_dev, txq);
181                 } else {
182                         /* Use default Tx function if we get here */
183                         PMD_INIT_LOG(NOTICE,
184                                 "No Tx queues configured yet. Using default Tx function.");
185                 }
186
187                 ngbe_set_rx_function(eth_dev);
188
189                 return 0;
190         }
191
192         rte_eth_copy_pci_info(eth_dev, pci_dev);
193
194         /* Vendor and Device ID need to be set before init of shared code */
195         hw->device_id = pci_dev->id.device_id;
196         hw->vendor_id = pci_dev->id.vendor_id;
197         hw->sub_system_id = pci_dev->id.subsystem_device_id;
198         ngbe_map_device_id(hw);
199         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
200
201         /* Reserve memory for interrupt status block */
202         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
203                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
204         if (mz == NULL)
205                 return -ENOMEM;
206
207         hw->isb_dma = TMZ_PADDR(mz);
208         hw->isb_mem = TMZ_VADDR(mz);
209
210         /* Initialize the shared code (base driver) */
211         err = ngbe_init_shared_code(hw);
212         if (err != 0) {
213                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
214                 return -EIO;
215         }
216
217         /* Unlock any pending hardware semaphore */
218         ngbe_swfw_lock_reset(hw);
219
220         err = hw->rom.init_params(hw);
221         if (err != 0) {
222                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
223                 return -EIO;
224         }
225
226         /* Make sure we have a good EEPROM before we read from it */
227         err = hw->rom.validate_checksum(hw, NULL);
228         if (err != 0) {
229                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
230                 return -EIO;
231         }
232
233         err = hw->mac.init_hw(hw);
234         if (err != 0) {
235                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
236                 return -EIO;
237         }
238
239         /* disable interrupt */
240         ngbe_disable_intr(hw);
241
242         /* Allocate memory for storing MAC addresses */
243         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
244                                                hw->mac.num_rar_entries, 0);
245         if (eth_dev->data->mac_addrs == NULL) {
246                 PMD_INIT_LOG(ERR,
247                              "Failed to allocate %u bytes needed to store MAC addresses",
248                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
249                 return -ENOMEM;
250         }
251
252         /* Copy the permanent MAC address */
253         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
254                         &eth_dev->data->mac_addrs[0]);
255
256         /* Allocate memory for storing hash filter MAC addresses */
257         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
258                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
259         if (eth_dev->data->hash_mac_addrs == NULL) {
260                 PMD_INIT_LOG(ERR,
261                              "Failed to allocate %d bytes needed to store MAC addresses",
262                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
263                 rte_free(eth_dev->data->mac_addrs);
264                 eth_dev->data->mac_addrs = NULL;
265                 return -ENOMEM;
266         }
267
268         /* initialize the vfta */
269         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
270
271         /* initialize the hw strip bitmap*/
272         memset(hwstrip, 0, sizeof(*hwstrip));
273
274         ctrl_ext = rd32(hw, NGBE_PORTCTL);
275         /* let hardware know driver is loaded */
276         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
277         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
278         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
279         wr32(hw, NGBE_PORTCTL, ctrl_ext);
280         ngbe_flush(hw);
281
282         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
283                         (int)hw->mac.type, (int)hw->phy.type);
284
285         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
286                      eth_dev->data->port_id, pci_dev->id.vendor_id,
287                      pci_dev->id.device_id);
288
289         rte_intr_callback_register(intr_handle,
290                                    ngbe_dev_interrupt_handler, eth_dev);
291
292         /* enable uio/vfio intr/eventfd mapping */
293         rte_intr_enable(intr_handle);
294
295         /* enable support intr */
296         ngbe_enable_intr(eth_dev);
297
298         return 0;
299 }
300
301 static int
302 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
303 {
304         PMD_INIT_FUNC_TRACE();
305
306         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
307                 return 0;
308
309         ngbe_dev_close(eth_dev);
310
311         return 0;
312 }
313
314 static int
315 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
316                 struct rte_pci_device *pci_dev)
317 {
318         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
319                         sizeof(struct ngbe_adapter),
320                         eth_dev_pci_specific_init, pci_dev,
321                         eth_ngbe_dev_init, NULL);
322 }
323
324 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
325 {
326         struct rte_eth_dev *ethdev;
327
328         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
329         if (ethdev == NULL)
330                 return 0;
331
332         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
333 }
334
335 static struct rte_pci_driver rte_ngbe_pmd = {
336         .id_table = pci_id_ngbe_map,
337         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
338                      RTE_PCI_DRV_INTR_LSC,
339         .probe = eth_ngbe_pci_probe,
340         .remove = eth_ngbe_pci_remove,
341 };
342
343 static int
344 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
345 {
346         struct ngbe_hw *hw = ngbe_dev_hw(dev);
347         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
348         uint32_t vfta;
349         uint32_t vid_idx;
350         uint32_t vid_bit;
351
352         vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
353         vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
354         vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
355         if (on)
356                 vfta |= vid_bit;
357         else
358                 vfta &= ~vid_bit;
359         wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
360
361         /* update local VFTA copy */
362         shadow_vfta->vfta[vid_idx] = vfta;
363
364         return 0;
365 }
366
367 static void
368 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
369 {
370         struct ngbe_hw *hw = ngbe_dev_hw(dev);
371         struct ngbe_rx_queue *rxq;
372         bool restart;
373         uint32_t rxcfg, rxbal, rxbah;
374
375         if (on)
376                 ngbe_vlan_hw_strip_enable(dev, queue);
377         else
378                 ngbe_vlan_hw_strip_disable(dev, queue);
379
380         rxq = dev->data->rx_queues[queue];
381         rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
382         rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
383         rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
384         if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
385                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
386                         !(rxcfg & NGBE_RXCFG_VLAN);
387                 rxcfg |= NGBE_RXCFG_VLAN;
388         } else {
389                 restart = (rxcfg & NGBE_RXCFG_ENA) &&
390                         (rxcfg & NGBE_RXCFG_VLAN);
391                 rxcfg &= ~NGBE_RXCFG_VLAN;
392         }
393         rxcfg &= ~NGBE_RXCFG_ENA;
394
395         if (restart) {
396                 /* set vlan strip for ring */
397                 ngbe_dev_rx_queue_stop(dev, queue);
398                 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
399                 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
400                 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
401                 ngbe_dev_rx_queue_start(dev, queue);
402         }
403 }
404
405 static int
406 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
407                     enum rte_vlan_type vlan_type,
408                     uint16_t tpid)
409 {
410         struct ngbe_hw *hw = ngbe_dev_hw(dev);
411         int ret = 0;
412         uint32_t portctrl, vlan_ext, qinq;
413
414         portctrl = rd32(hw, NGBE_PORTCTL);
415
416         vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
417         qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
418         switch (vlan_type) {
419         case RTE_ETH_VLAN_TYPE_INNER:
420                 if (vlan_ext) {
421                         wr32m(hw, NGBE_VLANCTL,
422                                 NGBE_VLANCTL_TPID_MASK,
423                                 NGBE_VLANCTL_TPID(tpid));
424                         wr32m(hw, NGBE_DMATXCTRL,
425                                 NGBE_DMATXCTRL_TPID_MASK,
426                                 NGBE_DMATXCTRL_TPID(tpid));
427                 } else {
428                         ret = -ENOTSUP;
429                         PMD_DRV_LOG(ERR,
430                                 "Inner type is not supported by single VLAN");
431                 }
432
433                 if (qinq) {
434                         wr32m(hw, NGBE_TAGTPID(0),
435                                 NGBE_TAGTPID_LSB_MASK,
436                                 NGBE_TAGTPID_LSB(tpid));
437                 }
438                 break;
439         case RTE_ETH_VLAN_TYPE_OUTER:
440                 if (vlan_ext) {
441                         /* Only the high 16-bits is valid */
442                         wr32m(hw, NGBE_EXTAG,
443                                 NGBE_EXTAG_VLAN_MASK,
444                                 NGBE_EXTAG_VLAN(tpid));
445                 } else {
446                         wr32m(hw, NGBE_VLANCTL,
447                                 NGBE_VLANCTL_TPID_MASK,
448                                 NGBE_VLANCTL_TPID(tpid));
449                         wr32m(hw, NGBE_DMATXCTRL,
450                                 NGBE_DMATXCTRL_TPID_MASK,
451                                 NGBE_DMATXCTRL_TPID(tpid));
452                 }
453
454                 if (qinq) {
455                         wr32m(hw, NGBE_TAGTPID(0),
456                                 NGBE_TAGTPID_MSB_MASK,
457                                 NGBE_TAGTPID_MSB(tpid));
458                 }
459                 break;
460         default:
461                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
462                 return -EINVAL;
463         }
464
465         return ret;
466 }
467
468 void
469 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
470 {
471         struct ngbe_hw *hw = ngbe_dev_hw(dev);
472         uint32_t vlnctrl;
473
474         PMD_INIT_FUNC_TRACE();
475
476         /* Filter Table Disable */
477         vlnctrl = rd32(hw, NGBE_VLANCTL);
478         vlnctrl &= ~NGBE_VLANCTL_VFE;
479         wr32(hw, NGBE_VLANCTL, vlnctrl);
480 }
481
482 void
483 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
484 {
485         struct ngbe_hw *hw = ngbe_dev_hw(dev);
486         struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
487         uint32_t vlnctrl;
488         uint16_t i;
489
490         PMD_INIT_FUNC_TRACE();
491
492         /* Filter Table Enable */
493         vlnctrl = rd32(hw, NGBE_VLANCTL);
494         vlnctrl &= ~NGBE_VLANCTL_CFIENA;
495         vlnctrl |= NGBE_VLANCTL_VFE;
496         wr32(hw, NGBE_VLANCTL, vlnctrl);
497
498         /* write whatever is in local vfta copy */
499         for (i = 0; i < NGBE_VFTA_SIZE; i++)
500                 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
501 }
502
503 void
504 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
505 {
506         struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
507         struct ngbe_rx_queue *rxq;
508
509         if (queue >= NGBE_MAX_RX_QUEUE_NUM)
510                 return;
511
512         if (on)
513                 NGBE_SET_HWSTRIP(hwstrip, queue);
514         else
515                 NGBE_CLEAR_HWSTRIP(hwstrip, queue);
516
517         if (queue >= dev->data->nb_rx_queues)
518                 return;
519
520         rxq = dev->data->rx_queues[queue];
521
522         if (on) {
523                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
524                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
525         } else {
526                 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
527                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
528         }
529 }
530
531 static void
532 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
533 {
534         struct ngbe_hw *hw = ngbe_dev_hw(dev);
535         uint32_t ctrl;
536
537         PMD_INIT_FUNC_TRACE();
538
539         ctrl = rd32(hw, NGBE_RXCFG(queue));
540         ctrl &= ~NGBE_RXCFG_VLAN;
541         wr32(hw, NGBE_RXCFG(queue), ctrl);
542
543         /* record those setting for HW strip per queue */
544         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
545 }
546
547 static void
548 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
549 {
550         struct ngbe_hw *hw = ngbe_dev_hw(dev);
551         uint32_t ctrl;
552
553         PMD_INIT_FUNC_TRACE();
554
555         ctrl = rd32(hw, NGBE_RXCFG(queue));
556         ctrl |= NGBE_RXCFG_VLAN;
557         wr32(hw, NGBE_RXCFG(queue), ctrl);
558
559         /* record those setting for HW strip per queue */
560         ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
561 }
562
563 static void
564 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
565 {
566         struct ngbe_hw *hw = ngbe_dev_hw(dev);
567         uint32_t ctrl;
568
569         PMD_INIT_FUNC_TRACE();
570
571         ctrl = rd32(hw, NGBE_PORTCTL);
572         ctrl &= ~NGBE_PORTCTL_VLANEXT;
573         ctrl &= ~NGBE_PORTCTL_QINQ;
574         wr32(hw, NGBE_PORTCTL, ctrl);
575 }
576
577 static void
578 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
579 {
580         struct ngbe_hw *hw = ngbe_dev_hw(dev);
581         uint32_t ctrl;
582
583         PMD_INIT_FUNC_TRACE();
584
585         ctrl  = rd32(hw, NGBE_PORTCTL);
586         ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
587         wr32(hw, NGBE_PORTCTL, ctrl);
588 }
589
590 static void
591 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
592 {
593         struct ngbe_hw *hw = ngbe_dev_hw(dev);
594         uint32_t ctrl;
595
596         PMD_INIT_FUNC_TRACE();
597
598         ctrl = rd32(hw, NGBE_PORTCTL);
599         ctrl &= ~NGBE_PORTCTL_QINQ;
600         wr32(hw, NGBE_PORTCTL, ctrl);
601 }
602
603 static void
604 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
605 {
606         struct ngbe_hw *hw = ngbe_dev_hw(dev);
607         uint32_t ctrl;
608
609         PMD_INIT_FUNC_TRACE();
610
611         ctrl  = rd32(hw, NGBE_PORTCTL);
612         ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
613         wr32(hw, NGBE_PORTCTL, ctrl);
614 }
615
616 void
617 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
618 {
619         struct ngbe_rx_queue *rxq;
620         uint16_t i;
621
622         PMD_INIT_FUNC_TRACE();
623
624         for (i = 0; i < dev->data->nb_rx_queues; i++) {
625                 rxq = dev->data->rx_queues[i];
626
627                 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
628                         ngbe_vlan_hw_strip_enable(dev, i);
629                 else
630                         ngbe_vlan_hw_strip_disable(dev, i);
631         }
632 }
633
634 void
635 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
636 {
637         uint16_t i;
638         struct rte_eth_rxmode *rxmode;
639         struct ngbe_rx_queue *rxq;
640
641         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
642                 rxmode = &dev->data->dev_conf.rxmode;
643                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
644                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
645                                 rxq = dev->data->rx_queues[i];
646                                 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
647                         }
648                 else
649                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
650                                 rxq = dev->data->rx_queues[i];
651                                 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
652                         }
653         }
654 }
655
656 static int
657 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
658 {
659         struct rte_eth_rxmode *rxmode;
660         rxmode = &dev->data->dev_conf.rxmode;
661
662         if (mask & RTE_ETH_VLAN_STRIP_MASK)
663                 ngbe_vlan_hw_strip_config(dev);
664
665         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
666                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
667                         ngbe_vlan_hw_filter_enable(dev);
668                 else
669                         ngbe_vlan_hw_filter_disable(dev);
670         }
671
672         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
673                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
674                         ngbe_vlan_hw_extend_enable(dev);
675                 else
676                         ngbe_vlan_hw_extend_disable(dev);
677         }
678
679         if (mask & RTE_ETH_QINQ_STRIP_MASK) {
680                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
681                         ngbe_qinq_hw_strip_enable(dev);
682                 else
683                         ngbe_qinq_hw_strip_disable(dev);
684         }
685
686         return 0;
687 }
688
689 static int
690 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
691 {
692         ngbe_config_vlan_strip_on_all_queues(dev, mask);
693
694         ngbe_vlan_offload_config(dev, mask);
695
696         return 0;
697 }
698
699 static int
700 ngbe_dev_configure(struct rte_eth_dev *dev)
701 {
702         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
703         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
704
705         PMD_INIT_FUNC_TRACE();
706
707         /* set flag to update link status after init */
708         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
709
710         /*
711          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
712          * allocation Rx preconditions we will reset it.
713          */
714         adapter->rx_bulk_alloc_allowed = true;
715
716         return 0;
717 }
718
719 static void
720 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
721 {
722         struct ngbe_hw *hw = ngbe_dev_hw(dev);
723         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
724
725         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
726         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
727         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
728         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
729                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
730         else
731                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
732
733         intr->mask_misc |= NGBE_ICRMISC_GPIO;
734 }
735
736 /*
737  * Configure device link speed and setup link.
738  * It returns 0 on success.
739  */
740 static int
741 ngbe_dev_start(struct rte_eth_dev *dev)
742 {
743         struct ngbe_hw *hw = ngbe_dev_hw(dev);
744         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
745         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
746         uint32_t intr_vector = 0;
747         int err;
748         bool link_up = false, negotiate = false;
749         uint32_t speed = 0;
750         uint32_t allowed_speeds = 0;
751         int mask = 0;
752         int status;
753         uint32_t *link_speeds;
754
755         PMD_INIT_FUNC_TRACE();
756
757         /* disable uio/vfio intr/eventfd mapping */
758         rte_intr_disable(intr_handle);
759
760         /* stop adapter */
761         hw->adapter_stopped = 0;
762         ngbe_stop_hw(hw);
763
764         /* reinitialize adapter, this calls reset and start */
765         hw->nb_rx_queues = dev->data->nb_rx_queues;
766         hw->nb_tx_queues = dev->data->nb_tx_queues;
767         status = ngbe_pf_reset_hw(hw);
768         if (status != 0)
769                 return -1;
770         hw->mac.start_hw(hw);
771         hw->mac.get_link_status = true;
772
773         ngbe_dev_phy_intr_setup(dev);
774
775         /* check and configure queue intr-vector mapping */
776         if ((rte_intr_cap_multiple(intr_handle) ||
777              !RTE_ETH_DEV_SRIOV(dev).active) &&
778             dev->data->dev_conf.intr_conf.rxq != 0) {
779                 intr_vector = dev->data->nb_rx_queues;
780                 if (rte_intr_efd_enable(intr_handle, intr_vector))
781                         return -1;
782         }
783
784         if (rte_intr_dp_is_en(intr_handle)) {
785                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
786                                                    dev->data->nb_rx_queues)) {
787                         PMD_INIT_LOG(ERR,
788                                      "Failed to allocate %d rx_queues intr_vec",
789                                      dev->data->nb_rx_queues);
790                         return -ENOMEM;
791                 }
792         }
793
794         /* confiugre MSI-X for sleep until Rx interrupt */
795         ngbe_configure_msix(dev);
796
797         /* initialize transmission unit */
798         ngbe_dev_tx_init(dev);
799
800         /* This can fail when allocating mbufs for descriptor rings */
801         err = ngbe_dev_rx_init(dev);
802         if (err != 0) {
803                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
804                 goto error;
805         }
806
807         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
808                 RTE_ETH_VLAN_EXTEND_MASK;
809         err = ngbe_vlan_offload_config(dev, mask);
810         if (err != 0) {
811                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
812                 goto error;
813         }
814
815         ngbe_configure_port(dev);
816
817         err = ngbe_dev_rxtx_start(dev);
818         if (err < 0) {
819                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
820                 goto error;
821         }
822
823         err = hw->mac.check_link(hw, &speed, &link_up, 0);
824         if (err != 0)
825                 goto error;
826         dev->data->dev_link.link_status = link_up;
827
828         link_speeds = &dev->data->dev_conf.link_speeds;
829         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
830                 negotiate = true;
831
832         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
833         if (err != 0)
834                 goto error;
835
836         allowed_speeds = 0;
837         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
838                 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
839         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
840                 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
841         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
842                 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
843
844         if (*link_speeds & ~allowed_speeds) {
845                 PMD_INIT_LOG(ERR, "Invalid link setting");
846                 goto error;
847         }
848
849         speed = 0x0;
850         if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
851                 speed = hw->mac.default_speeds;
852         } else {
853                 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
854                         speed |= NGBE_LINK_SPEED_1GB_FULL;
855                 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
856                         speed |= NGBE_LINK_SPEED_100M_FULL;
857                 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
858                         speed |= NGBE_LINK_SPEED_10M_FULL;
859         }
860
861         hw->phy.init_hw(hw);
862         err = hw->mac.setup_link(hw, speed, link_up);
863         if (err != 0)
864                 goto error;
865
866         if (rte_intr_allow_others(intr_handle)) {
867                 ngbe_dev_misc_interrupt_setup(dev);
868                 /* check if lsc interrupt is enabled */
869                 if (dev->data->dev_conf.intr_conf.lsc != 0)
870                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
871                 else
872                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
873                 ngbe_dev_macsec_interrupt_setup(dev);
874                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
875         } else {
876                 rte_intr_callback_unregister(intr_handle,
877                                              ngbe_dev_interrupt_handler, dev);
878                 if (dev->data->dev_conf.intr_conf.lsc != 0)
879                         PMD_INIT_LOG(INFO,
880                                      "LSC won't enable because of no intr multiplex");
881         }
882
883         /* check if rxq interrupt is enabled */
884         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
885             rte_intr_dp_is_en(intr_handle))
886                 ngbe_dev_rxq_interrupt_setup(dev);
887
888         /* enable UIO/VFIO intr/eventfd mapping */
889         rte_intr_enable(intr_handle);
890
891         /* resume enabled intr since HW reset */
892         ngbe_enable_intr(dev);
893
894         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
895                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
896                 /* gpio0 is used to power on/off control*/
897                 wr32(hw, NGBE_GPIODATA, 0);
898         }
899
900         /*
901          * Update link status right before return, because it may
902          * start link configuration process in a separate thread.
903          */
904         ngbe_dev_link_update(dev, 0);
905
906         return 0;
907
908 error:
909         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
910         ngbe_dev_clear_queues(dev);
911         return -EIO;
912 }
913
914 /*
915  * Stop device: disable rx and tx functions to allow for reconfiguring.
916  */
917 static int
918 ngbe_dev_stop(struct rte_eth_dev *dev)
919 {
920         struct rte_eth_link link;
921         struct ngbe_hw *hw = ngbe_dev_hw(dev);
922         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
923         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
924
925         if (hw->adapter_stopped)
926                 return 0;
927
928         PMD_INIT_FUNC_TRACE();
929
930         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
931                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
932                 /* gpio0 is used to power on/off control*/
933                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
934         }
935
936         /* disable interrupts */
937         ngbe_disable_intr(hw);
938
939         /* reset the NIC */
940         ngbe_pf_reset_hw(hw);
941         hw->adapter_stopped = 0;
942
943         /* stop adapter */
944         ngbe_stop_hw(hw);
945
946         ngbe_dev_clear_queues(dev);
947
948         /* Clear stored conf */
949         dev->data->scattered_rx = 0;
950
951         /* Clear recorded link status */
952         memset(&link, 0, sizeof(link));
953         rte_eth_linkstatus_set(dev, &link);
954
955         if (!rte_intr_allow_others(intr_handle))
956                 /* resume to the default handler */
957                 rte_intr_callback_register(intr_handle,
958                                            ngbe_dev_interrupt_handler,
959                                            (void *)dev);
960
961         /* Clean datapath event and queue/vec mapping */
962         rte_intr_efd_disable(intr_handle);
963         rte_intr_vec_list_free(intr_handle);
964
965         hw->adapter_stopped = true;
966         dev->data->dev_started = 0;
967
968         return 0;
969 }
970
971 /*
972  * Reset and stop device.
973  */
974 static int
975 ngbe_dev_close(struct rte_eth_dev *dev)
976 {
977         struct ngbe_hw *hw = ngbe_dev_hw(dev);
978         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
979         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
980         int retries = 0;
981         int ret;
982
983         PMD_INIT_FUNC_TRACE();
984
985         ngbe_pf_reset_hw(hw);
986
987         ngbe_dev_stop(dev);
988
989         ngbe_dev_free_queues(dev);
990
991         /* reprogram the RAR[0] in case user changed it. */
992         ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
993
994         /* Unlock any pending hardware semaphore */
995         ngbe_swfw_lock_reset(hw);
996
997         /* disable uio intr before callback unregister */
998         rte_intr_disable(intr_handle);
999
1000         do {
1001                 ret = rte_intr_callback_unregister(intr_handle,
1002                                 ngbe_dev_interrupt_handler, dev);
1003                 if (ret >= 0 || ret == -ENOENT) {
1004                         break;
1005                 } else if (ret != -EAGAIN) {
1006                         PMD_INIT_LOG(ERR,
1007                                 "intr callback unregister failed: %d",
1008                                 ret);
1009                 }
1010                 rte_delay_ms(100);
1011         } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1012
1013         rte_free(dev->data->mac_addrs);
1014         dev->data->mac_addrs = NULL;
1015
1016         rte_free(dev->data->hash_mac_addrs);
1017         dev->data->hash_mac_addrs = NULL;
1018
1019         return ret;
1020 }
1021
1022 /*
1023  * Reset PF device.
1024  */
1025 static int
1026 ngbe_dev_reset(struct rte_eth_dev *dev)
1027 {
1028         int ret;
1029
1030         ret = eth_ngbe_dev_uninit(dev);
1031         if (ret != 0)
1032                 return ret;
1033
1034         ret = eth_ngbe_dev_init(dev, NULL);
1035
1036         return ret;
1037 }
1038
1039 static int
1040 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1041 {
1042         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1043
1044         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1045         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1046         dev_info->min_rx_bufsize = 1024;
1047         dev_info->max_rx_pktlen = 15872;
1048         dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1049         dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1050                                      dev_info->rx_queue_offload_capa);
1051         dev_info->tx_queue_offload_capa = 0;
1052         dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1053
1054         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1055                 .rx_thresh = {
1056                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1057                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1058                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1059                 },
1060                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1061                 .rx_drop_en = 0,
1062                 .offloads = 0,
1063         };
1064
1065         dev_info->default_txconf = (struct rte_eth_txconf) {
1066                 .tx_thresh = {
1067                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1068                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1069                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1070                 },
1071                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1072                 .offloads = 0,
1073         };
1074
1075         dev_info->rx_desc_lim = rx_desc_lim;
1076         dev_info->tx_desc_lim = tx_desc_lim;
1077
1078         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1079                                 RTE_ETH_LINK_SPEED_10M;
1080
1081         /* Driver-preferred Rx/Tx parameters */
1082         dev_info->default_rxportconf.burst_size = 32;
1083         dev_info->default_txportconf.burst_size = 32;
1084         dev_info->default_rxportconf.nb_queues = 1;
1085         dev_info->default_txportconf.nb_queues = 1;
1086         dev_info->default_rxportconf.ring_size = 256;
1087         dev_info->default_txportconf.ring_size = 256;
1088
1089         return 0;
1090 }
1091
1092 const uint32_t *
1093 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1094 {
1095         if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1096             dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1097             dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1098             dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1099                 return ngbe_get_supported_ptypes();
1100
1101         return NULL;
1102 }
1103
1104 /* return 0 means link status changed, -1 means not changed */
1105 int
1106 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1107                             int wait_to_complete)
1108 {
1109         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1110         struct rte_eth_link link;
1111         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1112         u32 lan_speed = 0;
1113         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1114         bool link_up;
1115         int err;
1116         int wait = 1;
1117
1118         memset(&link, 0, sizeof(link));
1119         link.link_status = RTE_ETH_LINK_DOWN;
1120         link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1121         link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1122         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1123                         ~RTE_ETH_LINK_SPEED_AUTONEG);
1124
1125         hw->mac.get_link_status = true;
1126
1127         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1128                 return rte_eth_linkstatus_set(dev, &link);
1129
1130         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1131         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1132                 wait = 0;
1133
1134         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1135         if (err != 0) {
1136                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1137                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1138                 return rte_eth_linkstatus_set(dev, &link);
1139         }
1140
1141         if (!link_up)
1142                 return rte_eth_linkstatus_set(dev, &link);
1143
1144         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1145         link.link_status = RTE_ETH_LINK_UP;
1146         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1147
1148         switch (link_speed) {
1149         default:
1150         case NGBE_LINK_SPEED_UNKNOWN:
1151                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1152                 break;
1153
1154         case NGBE_LINK_SPEED_10M_FULL:
1155                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
1156                 lan_speed = 0;
1157                 break;
1158
1159         case NGBE_LINK_SPEED_100M_FULL:
1160                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
1161                 lan_speed = 1;
1162                 break;
1163
1164         case NGBE_LINK_SPEED_1GB_FULL:
1165                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
1166                 lan_speed = 2;
1167                 break;
1168         }
1169
1170         if (hw->is_pf) {
1171                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1172                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1173                                 NGBE_LINK_SPEED_100M_FULL |
1174                                 NGBE_LINK_SPEED_10M_FULL)) {
1175                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1176                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1177                 }
1178         }
1179
1180         return rte_eth_linkstatus_set(dev, &link);
1181 }
1182
1183 static int
1184 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1185 {
1186         return ngbe_dev_link_update_share(dev, wait_to_complete);
1187 }
1188
1189 /**
1190  * It clears the interrupt causes and enables the interrupt.
1191  * It will be called once only during NIC initialized.
1192  *
1193  * @param dev
1194  *  Pointer to struct rte_eth_dev.
1195  * @param on
1196  *  Enable or Disable.
1197  *
1198  * @return
1199  *  - On success, zero.
1200  *  - On failure, a negative value.
1201  */
1202 static int
1203 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1204 {
1205         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1206
1207         ngbe_dev_link_status_print(dev);
1208         if (on != 0) {
1209                 intr->mask_misc |= NGBE_ICRMISC_PHY;
1210                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
1211         } else {
1212                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1213                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1214         }
1215
1216         return 0;
1217 }
1218
1219 /**
1220  * It clears the interrupt causes and enables the interrupt.
1221  * It will be called once only during NIC initialized.
1222  *
1223  * @param dev
1224  *  Pointer to struct rte_eth_dev.
1225  *
1226  * @return
1227  *  - On success, zero.
1228  *  - On failure, a negative value.
1229  */
1230 static int
1231 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1232 {
1233         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1234         u64 mask;
1235
1236         mask = NGBE_ICR_MASK;
1237         mask &= (1ULL << NGBE_MISC_VEC_ID);
1238         intr->mask |= mask;
1239         intr->mask_misc |= NGBE_ICRMISC_GPIO;
1240
1241         return 0;
1242 }
1243
1244 /**
1245  * It clears the interrupt causes and enables the interrupt.
1246  * It will be called once only during NIC initialized.
1247  *
1248  * @param dev
1249  *  Pointer to struct rte_eth_dev.
1250  *
1251  * @return
1252  *  - On success, zero.
1253  *  - On failure, a negative value.
1254  */
1255 static int
1256 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1257 {
1258         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1259         u64 mask;
1260
1261         mask = NGBE_ICR_MASK;
1262         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
1263         intr->mask |= mask;
1264
1265         return 0;
1266 }
1267
1268 /**
1269  * It clears the interrupt causes and enables the interrupt.
1270  * It will be called once only during NIC initialized.
1271  *
1272  * @param dev
1273  *  Pointer to struct rte_eth_dev.
1274  *
1275  * @return
1276  *  - On success, zero.
1277  *  - On failure, a negative value.
1278  */
1279 static int
1280 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1281 {
1282         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1283
1284         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
1285
1286         return 0;
1287 }
1288
1289 /*
1290  * It reads ICR and sets flag for the link_update.
1291  *
1292  * @param dev
1293  *  Pointer to struct rte_eth_dev.
1294  *
1295  * @return
1296  *  - On success, zero.
1297  *  - On failure, a negative value.
1298  */
1299 static int
1300 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1301 {
1302         uint32_t eicr;
1303         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1304         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1305
1306         /* clear all cause mask */
1307         ngbe_disable_intr(hw);
1308
1309         /* read-on-clear nic registers here */
1310         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1311         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1312
1313         intr->flags = 0;
1314
1315         /* set flag for async link update */
1316         if (eicr & NGBE_ICRMISC_PHY)
1317                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1318
1319         if (eicr & NGBE_ICRMISC_VFMBX)
1320                 intr->flags |= NGBE_FLAG_MAILBOX;
1321
1322         if (eicr & NGBE_ICRMISC_LNKSEC)
1323                 intr->flags |= NGBE_FLAG_MACSEC;
1324
1325         if (eicr & NGBE_ICRMISC_GPIO)
1326                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
1327
1328         return 0;
1329 }
1330
1331 /**
1332  * It gets and then prints the link status.
1333  *
1334  * @param dev
1335  *  Pointer to struct rte_eth_dev.
1336  *
1337  * @return
1338  *  - On success, zero.
1339  *  - On failure, a negative value.
1340  */
1341 static void
1342 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
1343 {
1344         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1345         struct rte_eth_link link;
1346
1347         rte_eth_linkstatus_get(dev, &link);
1348
1349         if (link.link_status == RTE_ETH_LINK_UP) {
1350                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1351                                         (int)(dev->data->port_id),
1352                                         (unsigned int)link.link_speed,
1353                         link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1354                                         "full-duplex" : "half-duplex");
1355         } else {
1356                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1357                                 (int)(dev->data->port_id));
1358         }
1359         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1360                                 pci_dev->addr.domain,
1361                                 pci_dev->addr.bus,
1362                                 pci_dev->addr.devid,
1363                                 pci_dev->addr.function);
1364 }
1365
1366 /*
1367  * It executes link_update after knowing an interrupt occurred.
1368  *
1369  * @param dev
1370  *  Pointer to struct rte_eth_dev.
1371  *
1372  * @return
1373  *  - On success, zero.
1374  *  - On failure, a negative value.
1375  */
1376 static int
1377 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
1378 {
1379         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1380         int64_t timeout;
1381
1382         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1383
1384         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1385                 struct rte_eth_link link;
1386
1387                 /*get the link status before link update, for predicting later*/
1388                 rte_eth_linkstatus_get(dev, &link);
1389
1390                 ngbe_dev_link_update(dev, 0);
1391
1392                 /* likely to up */
1393                 if (link.link_status != RTE_ETH_LINK_UP)
1394                         /* handle it 1 sec later, wait it being stable */
1395                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
1396                 /* likely to down */
1397                 else
1398                         /* handle it 4 sec later, wait it being stable */
1399                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
1400
1401                 ngbe_dev_link_status_print(dev);
1402                 if (rte_eal_alarm_set(timeout * 1000,
1403                                       ngbe_dev_interrupt_delayed_handler,
1404                                       (void *)dev) < 0) {
1405                         PMD_DRV_LOG(ERR, "Error setting alarm");
1406                 } else {
1407                         /* remember original mask */
1408                         intr->mask_misc_orig = intr->mask_misc;
1409                         /* only disable lsc interrupt */
1410                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1411
1412                         intr->mask_orig = intr->mask;
1413                         /* only disable all misc interrupts */
1414                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
1415                 }
1416         }
1417
1418         PMD_DRV_LOG(DEBUG, "enable intr immediately");
1419         ngbe_enable_intr(dev);
1420
1421         return 0;
1422 }
1423
1424 /**
1425  * Interrupt handler which shall be registered for alarm callback for delayed
1426  * handling specific interrupt to wait for the stable nic state. As the
1427  * NIC interrupt state is not stable for ngbe after link is just down,
1428  * it needs to wait 4 seconds to get the stable status.
1429  *
1430  * @param param
1431  *  The address of parameter (struct rte_eth_dev *) registered before.
1432  */
1433 static void
1434 ngbe_dev_interrupt_delayed_handler(void *param)
1435 {
1436         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1437         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1438         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1439         uint32_t eicr;
1440
1441         ngbe_disable_intr(hw);
1442
1443         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1444
1445         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1446                 ngbe_dev_link_update(dev, 0);
1447                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
1448                 ngbe_dev_link_status_print(dev);
1449                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1450                                               NULL);
1451         }
1452
1453         if (intr->flags & NGBE_FLAG_MACSEC) {
1454                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1455                                               NULL);
1456                 intr->flags &= ~NGBE_FLAG_MACSEC;
1457         }
1458
1459         /* restore original mask */
1460         intr->mask_misc = intr->mask_misc_orig;
1461         intr->mask_misc_orig = 0;
1462         intr->mask = intr->mask_orig;
1463         intr->mask_orig = 0;
1464
1465         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1466         ngbe_enable_intr(dev);
1467 }
1468
1469 /**
1470  * Interrupt handler triggered by NIC  for handling
1471  * specific interrupt.
1472  *
1473  * @param param
1474  *  The address of parameter (struct rte_eth_dev *) registered before.
1475  */
1476 static void
1477 ngbe_dev_interrupt_handler(void *param)
1478 {
1479         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1480
1481         ngbe_dev_interrupt_get_status(dev);
1482         ngbe_dev_interrupt_action(dev);
1483 }
1484
1485 /**
1486  * Set the IVAR registers, mapping interrupt causes to vectors
1487  * @param hw
1488  *  pointer to ngbe_hw struct
1489  * @direction
1490  *  0 for Rx, 1 for Tx, -1 for other causes
1491  * @queue
1492  *  queue to map the corresponding interrupt to
1493  * @msix_vector
1494  *  the vector to map to the corresponding queue
1495  */
1496 void
1497 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1498                    uint8_t queue, uint8_t msix_vector)
1499 {
1500         uint32_t tmp, idx;
1501
1502         if (direction == -1) {
1503                 /* other causes */
1504                 msix_vector |= NGBE_IVARMISC_VLD;
1505                 idx = 0;
1506                 tmp = rd32(hw, NGBE_IVARMISC);
1507                 tmp &= ~(0xFF << idx);
1508                 tmp |= (msix_vector << idx);
1509                 wr32(hw, NGBE_IVARMISC, tmp);
1510         } else {
1511                 /* rx or tx causes */
1512                 /* Workround for ICR lost */
1513                 idx = ((16 * (queue & 1)) + (8 * direction));
1514                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1515                 tmp &= ~(0xFF << idx);
1516                 tmp |= (msix_vector << idx);
1517                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1518         }
1519 }
1520
1521 /**
1522  * Sets up the hardware to properly generate MSI-X interrupts
1523  * @hw
1524  *  board private structure
1525  */
1526 static void
1527 ngbe_configure_msix(struct rte_eth_dev *dev)
1528 {
1529         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1530         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1531         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1532         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1533         uint32_t vec = NGBE_MISC_VEC_ID;
1534         uint32_t gpie;
1535
1536         /*
1537          * Won't configure MSI-X register if no mapping is done
1538          * between intr vector and event fd
1539          * but if MSI-X has been enabled already, need to configure
1540          * auto clean, auto mask and throttling.
1541          */
1542         gpie = rd32(hw, NGBE_GPIE);
1543         if (!rte_intr_dp_is_en(intr_handle) &&
1544             !(gpie & NGBE_GPIE_MSIX))
1545                 return;
1546
1547         if (rte_intr_allow_others(intr_handle)) {
1548                 base = NGBE_RX_VEC_START;
1549                 vec = base;
1550         }
1551
1552         /* setup GPIE for MSI-X mode */
1553         gpie = rd32(hw, NGBE_GPIE);
1554         gpie |= NGBE_GPIE_MSIX;
1555         wr32(hw, NGBE_GPIE, gpie);
1556
1557         /* Populate the IVAR table and set the ITR values to the
1558          * corresponding register.
1559          */
1560         if (rte_intr_dp_is_en(intr_handle)) {
1561                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1562                         queue_id++) {
1563                         /* by default, 1:1 mapping */
1564                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
1565                         rte_intr_vec_list_index_set(intr_handle,
1566                                                            queue_id, vec);
1567                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
1568                             - 1)
1569                                 vec++;
1570                 }
1571
1572                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1573         }
1574         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1575                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1576                         | NGBE_ITR_WRDSA);
1577 }
1578
1579 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1580         .dev_configure              = ngbe_dev_configure,
1581         .dev_infos_get              = ngbe_dev_info_get,
1582         .dev_start                  = ngbe_dev_start,
1583         .dev_stop                   = ngbe_dev_stop,
1584         .dev_close                  = ngbe_dev_close,
1585         .dev_reset                  = ngbe_dev_reset,
1586         .link_update                = ngbe_dev_link_update,
1587         .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
1588         .vlan_filter_set            = ngbe_vlan_filter_set,
1589         .vlan_tpid_set              = ngbe_vlan_tpid_set,
1590         .vlan_offload_set           = ngbe_vlan_offload_set,
1591         .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
1592         .rx_queue_start             = ngbe_dev_rx_queue_start,
1593         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
1594         .tx_queue_start             = ngbe_dev_tx_queue_start,
1595         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
1596         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
1597         .rx_queue_release           = ngbe_dev_rx_queue_release,
1598         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
1599         .tx_queue_release           = ngbe_dev_tx_queue_release,
1600         .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
1601         .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
1602 };
1603
1604 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1605 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1606 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1607
1608 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1609 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1610
1611 #ifdef RTE_ETHDEV_DEBUG_RX
1612         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1613 #endif
1614 #ifdef RTE_ETHDEV_DEBUG_TX
1615         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
1616 #endif