54b563d7fe4d0d892122e4c0d22a60f321c8f7d1
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20
21 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
22 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
23 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
24 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
25 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
26 static void ngbe_dev_interrupt_handler(void *param);
27 static void ngbe_dev_interrupt_delayed_handler(void *param);
28 static void ngbe_configure_msix(struct rte_eth_dev *dev);
29
30 /*
31  * The set of PCI devices this driver supports
32  */
33 static const struct rte_pci_id pci_id_ngbe_map[] = {
34         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
35         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
36         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
37         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
38         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
39         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
40         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
42         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
43         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
46         { .vendor_id = 0, /* sentinel */ },
47 };
48
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50         .nb_max = NGBE_RING_DESC_MAX,
51         .nb_min = NGBE_RING_DESC_MIN,
52         .nb_align = NGBE_RXD_ALIGN,
53 };
54
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56         .nb_max = NGBE_RING_DESC_MAX,
57         .nb_min = NGBE_RING_DESC_MIN,
58         .nb_align = NGBE_TXD_ALIGN,
59         .nb_seg_max = NGBE_TX_MAX_SEG,
60         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
61 };
62
63 static const struct eth_dev_ops ngbe_eth_dev_ops;
64
65 static inline int32_t
66 ngbe_pf_reset_hw(struct ngbe_hw *hw)
67 {
68         uint32_t ctrl_ext;
69         int32_t status;
70
71         status = hw->mac.reset_hw(hw);
72
73         ctrl_ext = rd32(hw, NGBE_PORTCTL);
74         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
75         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
76         wr32(hw, NGBE_PORTCTL, ctrl_ext);
77         ngbe_flush(hw);
78
79         if (status == NGBE_ERR_SFP_NOT_PRESENT)
80                 status = 0;
81         return status;
82 }
83
84 static inline void
85 ngbe_enable_intr(struct rte_eth_dev *dev)
86 {
87         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
88         struct ngbe_hw *hw = ngbe_dev_hw(dev);
89
90         wr32(hw, NGBE_IENMISC, intr->mask_misc);
91         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
92         ngbe_flush(hw);
93 }
94
95 static void
96 ngbe_disable_intr(struct ngbe_hw *hw)
97 {
98         PMD_INIT_FUNC_TRACE();
99
100         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
101         ngbe_flush(hw);
102 }
103
104 /*
105  * Ensure that all locks are released before first NVM or PHY access
106  */
107 static void
108 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
109 {
110         uint16_t mask;
111
112         /*
113          * These ones are more tricky since they are common to all ports; but
114          * swfw_sync retries last long enough (1s) to be almost sure that if
115          * lock can not be taken it is due to an improper lock of the
116          * semaphore.
117          */
118         mask = NGBE_MNGSEM_SWPHY |
119                NGBE_MNGSEM_SWMBX |
120                NGBE_MNGSEM_SWFLASH;
121         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
122                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
123
124         hw->mac.release_swfw_sync(hw, mask);
125 }
126
127 static int
128 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
129 {
130         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
131         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
132         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
133         const struct rte_memzone *mz;
134         uint32_t ctrl_ext;
135         int err;
136
137         PMD_INIT_FUNC_TRACE();
138
139         eth_dev->dev_ops = &ngbe_eth_dev_ops;
140         eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
141
142         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
143                 return 0;
144
145         rte_eth_copy_pci_info(eth_dev, pci_dev);
146
147         /* Vendor and Device ID need to be set before init of shared code */
148         hw->device_id = pci_dev->id.device_id;
149         hw->vendor_id = pci_dev->id.vendor_id;
150         hw->sub_system_id = pci_dev->id.subsystem_device_id;
151         ngbe_map_device_id(hw);
152         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
153
154         /* Reserve memory for interrupt status block */
155         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
156                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
157         if (mz == NULL)
158                 return -ENOMEM;
159
160         hw->isb_dma = TMZ_PADDR(mz);
161         hw->isb_mem = TMZ_VADDR(mz);
162
163         /* Initialize the shared code (base driver) */
164         err = ngbe_init_shared_code(hw);
165         if (err != 0) {
166                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
167                 return -EIO;
168         }
169
170         /* Unlock any pending hardware semaphore */
171         ngbe_swfw_lock_reset(hw);
172
173         err = hw->rom.init_params(hw);
174         if (err != 0) {
175                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
176                 return -EIO;
177         }
178
179         /* Make sure we have a good EEPROM before we read from it */
180         err = hw->rom.validate_checksum(hw, NULL);
181         if (err != 0) {
182                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
183                 return -EIO;
184         }
185
186         err = hw->mac.init_hw(hw);
187         if (err != 0) {
188                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
189                 return -EIO;
190         }
191
192         /* disable interrupt */
193         ngbe_disable_intr(hw);
194
195         /* Allocate memory for storing MAC addresses */
196         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
197                                                hw->mac.num_rar_entries, 0);
198         if (eth_dev->data->mac_addrs == NULL) {
199                 PMD_INIT_LOG(ERR,
200                              "Failed to allocate %u bytes needed to store MAC addresses",
201                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
202                 return -ENOMEM;
203         }
204
205         /* Copy the permanent MAC address */
206         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
207                         &eth_dev->data->mac_addrs[0]);
208
209         /* Allocate memory for storing hash filter MAC addresses */
210         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
211                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
212         if (eth_dev->data->hash_mac_addrs == NULL) {
213                 PMD_INIT_LOG(ERR,
214                              "Failed to allocate %d bytes needed to store MAC addresses",
215                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
216                 rte_free(eth_dev->data->mac_addrs);
217                 eth_dev->data->mac_addrs = NULL;
218                 return -ENOMEM;
219         }
220
221         ctrl_ext = rd32(hw, NGBE_PORTCTL);
222         /* let hardware know driver is loaded */
223         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
224         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
225         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
226         wr32(hw, NGBE_PORTCTL, ctrl_ext);
227         ngbe_flush(hw);
228
229         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
230                         (int)hw->mac.type, (int)hw->phy.type);
231
232         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
233                      eth_dev->data->port_id, pci_dev->id.vendor_id,
234                      pci_dev->id.device_id);
235
236         rte_intr_callback_register(intr_handle,
237                                    ngbe_dev_interrupt_handler, eth_dev);
238
239         /* enable uio/vfio intr/eventfd mapping */
240         rte_intr_enable(intr_handle);
241
242         /* enable support intr */
243         ngbe_enable_intr(eth_dev);
244
245         return 0;
246 }
247
248 static int
249 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
250 {
251         PMD_INIT_FUNC_TRACE();
252
253         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
254                 return 0;
255
256         ngbe_dev_close(eth_dev);
257
258         return -EINVAL;
259 }
260
261 static int
262 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
263                 struct rte_pci_device *pci_dev)
264 {
265         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
266                         sizeof(struct ngbe_adapter),
267                         eth_dev_pci_specific_init, pci_dev,
268                         eth_ngbe_dev_init, NULL);
269 }
270
271 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
272 {
273         struct rte_eth_dev *ethdev;
274
275         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
276         if (ethdev == NULL)
277                 return 0;
278
279         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
280 }
281
282 static struct rte_pci_driver rte_ngbe_pmd = {
283         .id_table = pci_id_ngbe_map,
284         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
285                      RTE_PCI_DRV_INTR_LSC,
286         .probe = eth_ngbe_pci_probe,
287         .remove = eth_ngbe_pci_remove,
288 };
289
290 static int
291 ngbe_dev_configure(struct rte_eth_dev *dev)
292 {
293         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
294         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
295
296         PMD_INIT_FUNC_TRACE();
297
298         /* set flag to update link status after init */
299         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
300
301         /*
302          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
303          * allocation Rx preconditions we will reset it.
304          */
305         adapter->rx_bulk_alloc_allowed = true;
306
307         return 0;
308 }
309
310 static void
311 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
312 {
313         struct ngbe_hw *hw = ngbe_dev_hw(dev);
314         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
315
316         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
317         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
318         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
319         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
320                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
321         else
322                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
323
324         intr->mask_misc |= NGBE_ICRMISC_GPIO;
325 }
326
327 /*
328  * Configure device link speed and setup link.
329  * It returns 0 on success.
330  */
331 static int
332 ngbe_dev_start(struct rte_eth_dev *dev)
333 {
334         struct ngbe_hw *hw = ngbe_dev_hw(dev);
335         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
336         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
337         uint32_t intr_vector = 0;
338         int err;
339         bool link_up = false, negotiate = false;
340         uint32_t speed = 0;
341         uint32_t allowed_speeds = 0;
342         int status;
343         uint32_t *link_speeds;
344
345         PMD_INIT_FUNC_TRACE();
346
347         /* disable uio/vfio intr/eventfd mapping */
348         rte_intr_disable(intr_handle);
349
350         /* stop adapter */
351         hw->adapter_stopped = 0;
352         ngbe_stop_hw(hw);
353
354         /* reinitialize adapter, this calls reset and start */
355         hw->nb_rx_queues = dev->data->nb_rx_queues;
356         hw->nb_tx_queues = dev->data->nb_tx_queues;
357         status = ngbe_pf_reset_hw(hw);
358         if (status != 0)
359                 return -1;
360         hw->mac.start_hw(hw);
361         hw->mac.get_link_status = true;
362
363         ngbe_dev_phy_intr_setup(dev);
364
365         /* check and configure queue intr-vector mapping */
366         if ((rte_intr_cap_multiple(intr_handle) ||
367              !RTE_ETH_DEV_SRIOV(dev).active) &&
368             dev->data->dev_conf.intr_conf.rxq != 0) {
369                 intr_vector = dev->data->nb_rx_queues;
370                 if (rte_intr_efd_enable(intr_handle, intr_vector))
371                         return -1;
372         }
373
374         if (rte_intr_dp_is_en(intr_handle) && intr_handle->intr_vec == NULL) {
375                 intr_handle->intr_vec =
376                         rte_zmalloc("intr_vec",
377                                     dev->data->nb_rx_queues * sizeof(int), 0);
378                 if (intr_handle->intr_vec == NULL) {
379                         PMD_INIT_LOG(ERR,
380                                      "Failed to allocate %d rx_queues intr_vec",
381                                      dev->data->nb_rx_queues);
382                         return -ENOMEM;
383                 }
384         }
385
386         /* confiugre MSI-X for sleep until Rx interrupt */
387         ngbe_configure_msix(dev);
388
389         /* initialize transmission unit */
390         ngbe_dev_tx_init(dev);
391
392         /* This can fail when allocating mbufs for descriptor rings */
393         err = ngbe_dev_rx_init(dev);
394         if (err != 0) {
395                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
396                 goto error;
397         }
398
399         err = ngbe_dev_rxtx_start(dev);
400         if (err < 0) {
401                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
402                 goto error;
403         }
404
405         err = hw->mac.check_link(hw, &speed, &link_up, 0);
406         if (err != 0)
407                 goto error;
408         dev->data->dev_link.link_status = link_up;
409
410         link_speeds = &dev->data->dev_conf.link_speeds;
411         if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
412                 negotiate = true;
413
414         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
415         if (err != 0)
416                 goto error;
417
418         allowed_speeds = 0;
419         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
420                 allowed_speeds |= ETH_LINK_SPEED_1G;
421         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
422                 allowed_speeds |= ETH_LINK_SPEED_100M;
423         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
424                 allowed_speeds |= ETH_LINK_SPEED_10M;
425
426         if (*link_speeds & ~allowed_speeds) {
427                 PMD_INIT_LOG(ERR, "Invalid link setting");
428                 goto error;
429         }
430
431         speed = 0x0;
432         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
433                 speed = hw->mac.default_speeds;
434         } else {
435                 if (*link_speeds & ETH_LINK_SPEED_1G)
436                         speed |= NGBE_LINK_SPEED_1GB_FULL;
437                 if (*link_speeds & ETH_LINK_SPEED_100M)
438                         speed |= NGBE_LINK_SPEED_100M_FULL;
439                 if (*link_speeds & ETH_LINK_SPEED_10M)
440                         speed |= NGBE_LINK_SPEED_10M_FULL;
441         }
442
443         hw->phy.init_hw(hw);
444         err = hw->mac.setup_link(hw, speed, link_up);
445         if (err != 0)
446                 goto error;
447
448         if (rte_intr_allow_others(intr_handle)) {
449                 ngbe_dev_misc_interrupt_setup(dev);
450                 /* check if lsc interrupt is enabled */
451                 if (dev->data->dev_conf.intr_conf.lsc != 0)
452                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
453                 else
454                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
455                 ngbe_dev_macsec_interrupt_setup(dev);
456                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
457         } else {
458                 rte_intr_callback_unregister(intr_handle,
459                                              ngbe_dev_interrupt_handler, dev);
460                 if (dev->data->dev_conf.intr_conf.lsc != 0)
461                         PMD_INIT_LOG(INFO,
462                                      "LSC won't enable because of no intr multiplex");
463         }
464
465         /* check if rxq interrupt is enabled */
466         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
467             rte_intr_dp_is_en(intr_handle))
468                 ngbe_dev_rxq_interrupt_setup(dev);
469
470         /* enable UIO/VFIO intr/eventfd mapping */
471         rte_intr_enable(intr_handle);
472
473         /* resume enabled intr since HW reset */
474         ngbe_enable_intr(dev);
475
476         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
477                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
478                 /* gpio0 is used to power on/off control*/
479                 wr32(hw, NGBE_GPIODATA, 0);
480         }
481
482         /*
483          * Update link status right before return, because it may
484          * start link configuration process in a separate thread.
485          */
486         ngbe_dev_link_update(dev, 0);
487
488         return 0;
489
490 error:
491         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
492         ngbe_dev_clear_queues(dev);
493         return -EIO;
494 }
495
496 /*
497  * Stop device: disable rx and tx functions to allow for reconfiguring.
498  */
499 static int
500 ngbe_dev_stop(struct rte_eth_dev *dev)
501 {
502         struct rte_eth_link link;
503         struct ngbe_hw *hw = ngbe_dev_hw(dev);
504         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
505         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
506
507         if (hw->adapter_stopped)
508                 return 0;
509
510         PMD_INIT_FUNC_TRACE();
511
512         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
513                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
514                 /* gpio0 is used to power on/off control*/
515                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
516         }
517
518         /* disable interrupts */
519         ngbe_disable_intr(hw);
520
521         /* reset the NIC */
522         ngbe_pf_reset_hw(hw);
523         hw->adapter_stopped = 0;
524
525         /* stop adapter */
526         ngbe_stop_hw(hw);
527
528         ngbe_dev_clear_queues(dev);
529
530         /* Clear recorded link status */
531         memset(&link, 0, sizeof(link));
532         rte_eth_linkstatus_set(dev, &link);
533
534         if (!rte_intr_allow_others(intr_handle))
535                 /* resume to the default handler */
536                 rte_intr_callback_register(intr_handle,
537                                            ngbe_dev_interrupt_handler,
538                                            (void *)dev);
539
540         /* Clean datapath event and queue/vec mapping */
541         rte_intr_efd_disable(intr_handle);
542         if (intr_handle->intr_vec != NULL) {
543                 rte_free(intr_handle->intr_vec);
544                 intr_handle->intr_vec = NULL;
545         }
546
547         hw->adapter_stopped = true;
548         dev->data->dev_started = 0;
549
550         return 0;
551 }
552
553 /*
554  * Reset and stop device.
555  */
556 static int
557 ngbe_dev_close(struct rte_eth_dev *dev)
558 {
559         PMD_INIT_FUNC_TRACE();
560
561         RTE_SET_USED(dev);
562
563         return -EINVAL;
564 }
565
566 static int
567 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
568 {
569         struct ngbe_hw *hw = ngbe_dev_hw(dev);
570
571         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
572         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
573         dev_info->min_rx_bufsize = 1024;
574         dev_info->max_rx_pktlen = 15872;
575
576         dev_info->default_rxconf = (struct rte_eth_rxconf) {
577                 .rx_thresh = {
578                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
579                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
580                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
581                 },
582                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
583                 .rx_drop_en = 0,
584                 .offloads = 0,
585         };
586
587         dev_info->default_txconf = (struct rte_eth_txconf) {
588                 .tx_thresh = {
589                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
590                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
591                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
592                 },
593                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
594                 .offloads = 0,
595         };
596
597         dev_info->rx_desc_lim = rx_desc_lim;
598         dev_info->tx_desc_lim = tx_desc_lim;
599
600         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
601                                 ETH_LINK_SPEED_10M;
602
603         /* Driver-preferred Rx/Tx parameters */
604         dev_info->default_rxportconf.burst_size = 32;
605         dev_info->default_txportconf.burst_size = 32;
606         dev_info->default_rxportconf.nb_queues = 1;
607         dev_info->default_txportconf.nb_queues = 1;
608         dev_info->default_rxportconf.ring_size = 256;
609         dev_info->default_txportconf.ring_size = 256;
610
611         return 0;
612 }
613
614 /* return 0 means link status changed, -1 means not changed */
615 int
616 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
617                             int wait_to_complete)
618 {
619         struct ngbe_hw *hw = ngbe_dev_hw(dev);
620         struct rte_eth_link link;
621         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
622         u32 lan_speed = 0;
623         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
624         bool link_up;
625         int err;
626         int wait = 1;
627
628         memset(&link, 0, sizeof(link));
629         link.link_status = ETH_LINK_DOWN;
630         link.link_speed = ETH_SPEED_NUM_NONE;
631         link.link_duplex = ETH_LINK_HALF_DUPLEX;
632         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
633                         ~ETH_LINK_SPEED_AUTONEG);
634
635         hw->mac.get_link_status = true;
636
637         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
638                 return rte_eth_linkstatus_set(dev, &link);
639
640         /* check if it needs to wait to complete, if lsc interrupt is enabled */
641         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
642                 wait = 0;
643
644         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
645         if (err != 0) {
646                 link.link_speed = ETH_SPEED_NUM_NONE;
647                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
648                 return rte_eth_linkstatus_set(dev, &link);
649         }
650
651         if (!link_up)
652                 return rte_eth_linkstatus_set(dev, &link);
653
654         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
655         link.link_status = ETH_LINK_UP;
656         link.link_duplex = ETH_LINK_FULL_DUPLEX;
657
658         switch (link_speed) {
659         default:
660         case NGBE_LINK_SPEED_UNKNOWN:
661                 link.link_speed = ETH_SPEED_NUM_NONE;
662                 break;
663
664         case NGBE_LINK_SPEED_10M_FULL:
665                 link.link_speed = ETH_SPEED_NUM_10M;
666                 lan_speed = 0;
667                 break;
668
669         case NGBE_LINK_SPEED_100M_FULL:
670                 link.link_speed = ETH_SPEED_NUM_100M;
671                 lan_speed = 1;
672                 break;
673
674         case NGBE_LINK_SPEED_1GB_FULL:
675                 link.link_speed = ETH_SPEED_NUM_1G;
676                 lan_speed = 2;
677                 break;
678         }
679
680         if (hw->is_pf) {
681                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
682                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
683                                 NGBE_LINK_SPEED_100M_FULL |
684                                 NGBE_LINK_SPEED_10M_FULL)) {
685                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
686                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
687                 }
688         }
689
690         return rte_eth_linkstatus_set(dev, &link);
691 }
692
693 static int
694 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
695 {
696         return ngbe_dev_link_update_share(dev, wait_to_complete);
697 }
698
699 /**
700  * It clears the interrupt causes and enables the interrupt.
701  * It will be called once only during NIC initialized.
702  *
703  * @param dev
704  *  Pointer to struct rte_eth_dev.
705  * @param on
706  *  Enable or Disable.
707  *
708  * @return
709  *  - On success, zero.
710  *  - On failure, a negative value.
711  */
712 static int
713 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
714 {
715         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
716
717         ngbe_dev_link_status_print(dev);
718         if (on != 0) {
719                 intr->mask_misc |= NGBE_ICRMISC_PHY;
720                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
721         } else {
722                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
723                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
724         }
725
726         return 0;
727 }
728
729 /**
730  * It clears the interrupt causes and enables the interrupt.
731  * It will be called once only during NIC initialized.
732  *
733  * @param dev
734  *  Pointer to struct rte_eth_dev.
735  *
736  * @return
737  *  - On success, zero.
738  *  - On failure, a negative value.
739  */
740 static int
741 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
742 {
743         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
744         u64 mask;
745
746         mask = NGBE_ICR_MASK;
747         mask &= (1ULL << NGBE_MISC_VEC_ID);
748         intr->mask |= mask;
749         intr->mask_misc |= NGBE_ICRMISC_GPIO;
750
751         return 0;
752 }
753
754 /**
755  * It clears the interrupt causes and enables the interrupt.
756  * It will be called once only during NIC initialized.
757  *
758  * @param dev
759  *  Pointer to struct rte_eth_dev.
760  *
761  * @return
762  *  - On success, zero.
763  *  - On failure, a negative value.
764  */
765 static int
766 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
767 {
768         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
769         u64 mask;
770
771         mask = NGBE_ICR_MASK;
772         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
773         intr->mask |= mask;
774
775         return 0;
776 }
777
778 /**
779  * It clears the interrupt causes and enables the interrupt.
780  * It will be called once only during NIC initialized.
781  *
782  * @param dev
783  *  Pointer to struct rte_eth_dev.
784  *
785  * @return
786  *  - On success, zero.
787  *  - On failure, a negative value.
788  */
789 static int
790 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
791 {
792         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
793
794         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
795
796         return 0;
797 }
798
799 /*
800  * It reads ICR and sets flag for the link_update.
801  *
802  * @param dev
803  *  Pointer to struct rte_eth_dev.
804  *
805  * @return
806  *  - On success, zero.
807  *  - On failure, a negative value.
808  */
809 static int
810 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
811 {
812         uint32_t eicr;
813         struct ngbe_hw *hw = ngbe_dev_hw(dev);
814         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
815
816         /* clear all cause mask */
817         ngbe_disable_intr(hw);
818
819         /* read-on-clear nic registers here */
820         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
821         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
822
823         intr->flags = 0;
824
825         /* set flag for async link update */
826         if (eicr & NGBE_ICRMISC_PHY)
827                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
828
829         if (eicr & NGBE_ICRMISC_VFMBX)
830                 intr->flags |= NGBE_FLAG_MAILBOX;
831
832         if (eicr & NGBE_ICRMISC_LNKSEC)
833                 intr->flags |= NGBE_FLAG_MACSEC;
834
835         if (eicr & NGBE_ICRMISC_GPIO)
836                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
837
838         return 0;
839 }
840
841 /**
842  * It gets and then prints the link status.
843  *
844  * @param dev
845  *  Pointer to struct rte_eth_dev.
846  *
847  * @return
848  *  - On success, zero.
849  *  - On failure, a negative value.
850  */
851 static void
852 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
853 {
854         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
855         struct rte_eth_link link;
856
857         rte_eth_linkstatus_get(dev, &link);
858
859         if (link.link_status == ETH_LINK_UP) {
860                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
861                                         (int)(dev->data->port_id),
862                                         (unsigned int)link.link_speed,
863                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
864                                         "full-duplex" : "half-duplex");
865         } else {
866                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
867                                 (int)(dev->data->port_id));
868         }
869         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
870                                 pci_dev->addr.domain,
871                                 pci_dev->addr.bus,
872                                 pci_dev->addr.devid,
873                                 pci_dev->addr.function);
874 }
875
876 /*
877  * It executes link_update after knowing an interrupt occurred.
878  *
879  * @param dev
880  *  Pointer to struct rte_eth_dev.
881  *
882  * @return
883  *  - On success, zero.
884  *  - On failure, a negative value.
885  */
886 static int
887 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
888 {
889         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
890         int64_t timeout;
891
892         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
893
894         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
895                 struct rte_eth_link link;
896
897                 /*get the link status before link update, for predicting later*/
898                 rte_eth_linkstatus_get(dev, &link);
899
900                 ngbe_dev_link_update(dev, 0);
901
902                 /* likely to up */
903                 if (link.link_status != ETH_LINK_UP)
904                         /* handle it 1 sec later, wait it being stable */
905                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
906                 /* likely to down */
907                 else
908                         /* handle it 4 sec later, wait it being stable */
909                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
910
911                 ngbe_dev_link_status_print(dev);
912                 if (rte_eal_alarm_set(timeout * 1000,
913                                       ngbe_dev_interrupt_delayed_handler,
914                                       (void *)dev) < 0) {
915                         PMD_DRV_LOG(ERR, "Error setting alarm");
916                 } else {
917                         /* remember original mask */
918                         intr->mask_misc_orig = intr->mask_misc;
919                         /* only disable lsc interrupt */
920                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
921
922                         intr->mask_orig = intr->mask;
923                         /* only disable all misc interrupts */
924                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
925                 }
926         }
927
928         PMD_DRV_LOG(DEBUG, "enable intr immediately");
929         ngbe_enable_intr(dev);
930
931         return 0;
932 }
933
934 /**
935  * Interrupt handler which shall be registered for alarm callback for delayed
936  * handling specific interrupt to wait for the stable nic state. As the
937  * NIC interrupt state is not stable for ngbe after link is just down,
938  * it needs to wait 4 seconds to get the stable status.
939  *
940  * @param param
941  *  The address of parameter (struct rte_eth_dev *) registered before.
942  */
943 static void
944 ngbe_dev_interrupt_delayed_handler(void *param)
945 {
946         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
947         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
948         struct ngbe_hw *hw = ngbe_dev_hw(dev);
949         uint32_t eicr;
950
951         ngbe_disable_intr(hw);
952
953         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
954
955         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
956                 ngbe_dev_link_update(dev, 0);
957                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
958                 ngbe_dev_link_status_print(dev);
959                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
960                                               NULL);
961         }
962
963         if (intr->flags & NGBE_FLAG_MACSEC) {
964                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
965                                               NULL);
966                 intr->flags &= ~NGBE_FLAG_MACSEC;
967         }
968
969         /* restore original mask */
970         intr->mask_misc = intr->mask_misc_orig;
971         intr->mask_misc_orig = 0;
972         intr->mask = intr->mask_orig;
973         intr->mask_orig = 0;
974
975         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
976         ngbe_enable_intr(dev);
977 }
978
979 /**
980  * Interrupt handler triggered by NIC  for handling
981  * specific interrupt.
982  *
983  * @param param
984  *  The address of parameter (struct rte_eth_dev *) registered before.
985  */
986 static void
987 ngbe_dev_interrupt_handler(void *param)
988 {
989         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
990
991         ngbe_dev_interrupt_get_status(dev);
992         ngbe_dev_interrupt_action(dev);
993 }
994
995 /**
996  * Set the IVAR registers, mapping interrupt causes to vectors
997  * @param hw
998  *  pointer to ngbe_hw struct
999  * @direction
1000  *  0 for Rx, 1 for Tx, -1 for other causes
1001  * @queue
1002  *  queue to map the corresponding interrupt to
1003  * @msix_vector
1004  *  the vector to map to the corresponding queue
1005  */
1006 void
1007 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1008                    uint8_t queue, uint8_t msix_vector)
1009 {
1010         uint32_t tmp, idx;
1011
1012         if (direction == -1) {
1013                 /* other causes */
1014                 msix_vector |= NGBE_IVARMISC_VLD;
1015                 idx = 0;
1016                 tmp = rd32(hw, NGBE_IVARMISC);
1017                 tmp &= ~(0xFF << idx);
1018                 tmp |= (msix_vector << idx);
1019                 wr32(hw, NGBE_IVARMISC, tmp);
1020         } else {
1021                 /* rx or tx causes */
1022                 /* Workround for ICR lost */
1023                 idx = ((16 * (queue & 1)) + (8 * direction));
1024                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1025                 tmp &= ~(0xFF << idx);
1026                 tmp |= (msix_vector << idx);
1027                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1028         }
1029 }
1030
1031 /**
1032  * Sets up the hardware to properly generate MSI-X interrupts
1033  * @hw
1034  *  board private structure
1035  */
1036 static void
1037 ngbe_configure_msix(struct rte_eth_dev *dev)
1038 {
1039         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1040         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1041         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1042         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1043         uint32_t vec = NGBE_MISC_VEC_ID;
1044         uint32_t gpie;
1045
1046         /*
1047          * Won't configure MSI-X register if no mapping is done
1048          * between intr vector and event fd
1049          * but if MSI-X has been enabled already, need to configure
1050          * auto clean, auto mask and throttling.
1051          */
1052         gpie = rd32(hw, NGBE_GPIE);
1053         if (!rte_intr_dp_is_en(intr_handle) &&
1054             !(gpie & NGBE_GPIE_MSIX))
1055                 return;
1056
1057         if (rte_intr_allow_others(intr_handle)) {
1058                 base = NGBE_RX_VEC_START;
1059                 vec = base;
1060         }
1061
1062         /* setup GPIE for MSI-X mode */
1063         gpie = rd32(hw, NGBE_GPIE);
1064         gpie |= NGBE_GPIE_MSIX;
1065         wr32(hw, NGBE_GPIE, gpie);
1066
1067         /* Populate the IVAR table and set the ITR values to the
1068          * corresponding register.
1069          */
1070         if (rte_intr_dp_is_en(intr_handle)) {
1071                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1072                         queue_id++) {
1073                         /* by default, 1:1 mapping */
1074                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
1075                         intr_handle->intr_vec[queue_id] = vec;
1076                         if (vec < base + intr_handle->nb_efd - 1)
1077                                 vec++;
1078                 }
1079
1080                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1081         }
1082         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1083                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1084                         | NGBE_ITR_WRDSA);
1085 }
1086
1087 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1088         .dev_configure              = ngbe_dev_configure,
1089         .dev_infos_get              = ngbe_dev_info_get,
1090         .dev_start                  = ngbe_dev_start,
1091         .dev_stop                   = ngbe_dev_stop,
1092         .link_update                = ngbe_dev_link_update,
1093         .rx_queue_start             = ngbe_dev_rx_queue_start,
1094         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
1095         .tx_queue_start             = ngbe_dev_tx_queue_start,
1096         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
1097         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
1098         .rx_queue_release           = ngbe_dev_rx_queue_release,
1099         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
1100         .tx_queue_release           = ngbe_dev_tx_queue_release,
1101 };
1102
1103 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1104 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1105 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1106
1107 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1108 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1109
1110 #ifdef RTE_ETHDEV_DEBUG_RX
1111         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1112 #endif
1113 #ifdef RTE_ETHDEV_DEBUG_TX
1114         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
1115 #endif