3eeda53b72ed9e44a0aadae6e54b251618bfb70a
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19                                 int wait_to_complete);
20
21 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
22 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
23 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
24 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
25 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
26 static void ngbe_dev_interrupt_handler(void *param);
27 static void ngbe_dev_interrupt_delayed_handler(void *param);
28 static void ngbe_configure_msix(struct rte_eth_dev *dev);
29
30 /*
31  * The set of PCI devices this driver supports
32  */
33 static const struct rte_pci_id pci_id_ngbe_map[] = {
34         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
35         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
36         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
37         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
38         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
39         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
40         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
42         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
43         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
46         { .vendor_id = 0, /* sentinel */ },
47 };
48
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50         .nb_max = NGBE_RING_DESC_MAX,
51         .nb_min = NGBE_RING_DESC_MIN,
52         .nb_align = NGBE_RXD_ALIGN,
53 };
54
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56         .nb_max = NGBE_RING_DESC_MAX,
57         .nb_min = NGBE_RING_DESC_MIN,
58         .nb_align = NGBE_TXD_ALIGN,
59         .nb_seg_max = NGBE_TX_MAX_SEG,
60         .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
61 };
62
63 static const struct eth_dev_ops ngbe_eth_dev_ops;
64
65 static inline int32_t
66 ngbe_pf_reset_hw(struct ngbe_hw *hw)
67 {
68         uint32_t ctrl_ext;
69         int32_t status;
70
71         status = hw->mac.reset_hw(hw);
72
73         ctrl_ext = rd32(hw, NGBE_PORTCTL);
74         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
75         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
76         wr32(hw, NGBE_PORTCTL, ctrl_ext);
77         ngbe_flush(hw);
78
79         if (status == NGBE_ERR_SFP_NOT_PRESENT)
80                 status = 0;
81         return status;
82 }
83
84 static inline void
85 ngbe_enable_intr(struct rte_eth_dev *dev)
86 {
87         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
88         struct ngbe_hw *hw = ngbe_dev_hw(dev);
89
90         wr32(hw, NGBE_IENMISC, intr->mask_misc);
91         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
92         ngbe_flush(hw);
93 }
94
95 static void
96 ngbe_disable_intr(struct ngbe_hw *hw)
97 {
98         PMD_INIT_FUNC_TRACE();
99
100         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
101         ngbe_flush(hw);
102 }
103
104 /*
105  * Ensure that all locks are released before first NVM or PHY access
106  */
107 static void
108 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
109 {
110         uint16_t mask;
111
112         /*
113          * These ones are more tricky since they are common to all ports; but
114          * swfw_sync retries last long enough (1s) to be almost sure that if
115          * lock can not be taken it is due to an improper lock of the
116          * semaphore.
117          */
118         mask = NGBE_MNGSEM_SWPHY |
119                NGBE_MNGSEM_SWMBX |
120                NGBE_MNGSEM_SWFLASH;
121         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
122                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
123
124         hw->mac.release_swfw_sync(hw, mask);
125 }
126
127 static int
128 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
129 {
130         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
131         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
132         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
133         const struct rte_memzone *mz;
134         uint32_t ctrl_ext;
135         int err;
136
137         PMD_INIT_FUNC_TRACE();
138
139         eth_dev->dev_ops = &ngbe_eth_dev_ops;
140
141         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
142                 return 0;
143
144         rte_eth_copy_pci_info(eth_dev, pci_dev);
145
146         /* Vendor and Device ID need to be set before init of shared code */
147         hw->device_id = pci_dev->id.device_id;
148         hw->vendor_id = pci_dev->id.vendor_id;
149         hw->sub_system_id = pci_dev->id.subsystem_device_id;
150         ngbe_map_device_id(hw);
151         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
152
153         /* Reserve memory for interrupt status block */
154         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
155                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
156         if (mz == NULL)
157                 return -ENOMEM;
158
159         hw->isb_dma = TMZ_PADDR(mz);
160         hw->isb_mem = TMZ_VADDR(mz);
161
162         /* Initialize the shared code (base driver) */
163         err = ngbe_init_shared_code(hw);
164         if (err != 0) {
165                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
166                 return -EIO;
167         }
168
169         /* Unlock any pending hardware semaphore */
170         ngbe_swfw_lock_reset(hw);
171
172         err = hw->rom.init_params(hw);
173         if (err != 0) {
174                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
175                 return -EIO;
176         }
177
178         /* Make sure we have a good EEPROM before we read from it */
179         err = hw->rom.validate_checksum(hw, NULL);
180         if (err != 0) {
181                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
182                 return -EIO;
183         }
184
185         err = hw->mac.init_hw(hw);
186         if (err != 0) {
187                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
188                 return -EIO;
189         }
190
191         /* disable interrupt */
192         ngbe_disable_intr(hw);
193
194         /* Allocate memory for storing MAC addresses */
195         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
196                                                hw->mac.num_rar_entries, 0);
197         if (eth_dev->data->mac_addrs == NULL) {
198                 PMD_INIT_LOG(ERR,
199                              "Failed to allocate %u bytes needed to store MAC addresses",
200                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
201                 return -ENOMEM;
202         }
203
204         /* Copy the permanent MAC address */
205         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
206                         &eth_dev->data->mac_addrs[0]);
207
208         /* Allocate memory for storing hash filter MAC addresses */
209         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
210                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
211         if (eth_dev->data->hash_mac_addrs == NULL) {
212                 PMD_INIT_LOG(ERR,
213                              "Failed to allocate %d bytes needed to store MAC addresses",
214                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
215                 rte_free(eth_dev->data->mac_addrs);
216                 eth_dev->data->mac_addrs = NULL;
217                 return -ENOMEM;
218         }
219
220         ctrl_ext = rd32(hw, NGBE_PORTCTL);
221         /* let hardware know driver is loaded */
222         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
223         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
224         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
225         wr32(hw, NGBE_PORTCTL, ctrl_ext);
226         ngbe_flush(hw);
227
228         PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
229                         (int)hw->mac.type, (int)hw->phy.type);
230
231         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
232                      eth_dev->data->port_id, pci_dev->id.vendor_id,
233                      pci_dev->id.device_id);
234
235         rte_intr_callback_register(intr_handle,
236                                    ngbe_dev_interrupt_handler, eth_dev);
237
238         /* enable uio/vfio intr/eventfd mapping */
239         rte_intr_enable(intr_handle);
240
241         /* enable support intr */
242         ngbe_enable_intr(eth_dev);
243
244         return 0;
245 }
246
247 static int
248 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
249 {
250         PMD_INIT_FUNC_TRACE();
251
252         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
253                 return 0;
254
255         ngbe_dev_close(eth_dev);
256
257         return -EINVAL;
258 }
259
260 static int
261 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
262                 struct rte_pci_device *pci_dev)
263 {
264         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
265                         sizeof(struct ngbe_adapter),
266                         eth_dev_pci_specific_init, pci_dev,
267                         eth_ngbe_dev_init, NULL);
268 }
269
270 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
271 {
272         struct rte_eth_dev *ethdev;
273
274         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
275         if (ethdev == NULL)
276                 return 0;
277
278         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
279 }
280
281 static struct rte_pci_driver rte_ngbe_pmd = {
282         .id_table = pci_id_ngbe_map,
283         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
284                      RTE_PCI_DRV_INTR_LSC,
285         .probe = eth_ngbe_pci_probe,
286         .remove = eth_ngbe_pci_remove,
287 };
288
289 static int
290 ngbe_dev_configure(struct rte_eth_dev *dev)
291 {
292         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
293         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
294
295         PMD_INIT_FUNC_TRACE();
296
297         /* set flag to update link status after init */
298         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
299
300         /*
301          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
302          * allocation Rx preconditions we will reset it.
303          */
304         adapter->rx_bulk_alloc_allowed = true;
305
306         return 0;
307 }
308
309 static void
310 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
311 {
312         struct ngbe_hw *hw = ngbe_dev_hw(dev);
313         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
314
315         wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
316         wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
317         wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
318         if (hw->phy.type == ngbe_phy_yt8521s_sfi)
319                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
320         else
321                 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
322
323         intr->mask_misc |= NGBE_ICRMISC_GPIO;
324 }
325
326 /*
327  * Configure device link speed and setup link.
328  * It returns 0 on success.
329  */
330 static int
331 ngbe_dev_start(struct rte_eth_dev *dev)
332 {
333         struct ngbe_hw *hw = ngbe_dev_hw(dev);
334         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
335         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
336         uint32_t intr_vector = 0;
337         int err;
338         bool link_up = false, negotiate = false;
339         uint32_t speed = 0;
340         uint32_t allowed_speeds = 0;
341         int status;
342         uint32_t *link_speeds;
343
344         PMD_INIT_FUNC_TRACE();
345
346         /* disable uio/vfio intr/eventfd mapping */
347         rte_intr_disable(intr_handle);
348
349         /* stop adapter */
350         hw->adapter_stopped = 0;
351         ngbe_stop_hw(hw);
352
353         /* reinitialize adapter, this calls reset and start */
354         hw->nb_rx_queues = dev->data->nb_rx_queues;
355         hw->nb_tx_queues = dev->data->nb_tx_queues;
356         status = ngbe_pf_reset_hw(hw);
357         if (status != 0)
358                 return -1;
359         hw->mac.start_hw(hw);
360         hw->mac.get_link_status = true;
361
362         ngbe_dev_phy_intr_setup(dev);
363
364         /* check and configure queue intr-vector mapping */
365         if ((rte_intr_cap_multiple(intr_handle) ||
366              !RTE_ETH_DEV_SRIOV(dev).active) &&
367             dev->data->dev_conf.intr_conf.rxq != 0) {
368                 intr_vector = dev->data->nb_rx_queues;
369                 if (rte_intr_efd_enable(intr_handle, intr_vector))
370                         return -1;
371         }
372
373         if (rte_intr_dp_is_en(intr_handle) && intr_handle->intr_vec == NULL) {
374                 intr_handle->intr_vec =
375                         rte_zmalloc("intr_vec",
376                                     dev->data->nb_rx_queues * sizeof(int), 0);
377                 if (intr_handle->intr_vec == NULL) {
378                         PMD_INIT_LOG(ERR,
379                                      "Failed to allocate %d rx_queues intr_vec",
380                                      dev->data->nb_rx_queues);
381                         return -ENOMEM;
382                 }
383         }
384
385         /* confiugre MSI-X for sleep until Rx interrupt */
386         ngbe_configure_msix(dev);
387
388         /* initialize transmission unit */
389         ngbe_dev_tx_init(dev);
390
391         /* This can fail when allocating mbufs for descriptor rings */
392         err = ngbe_dev_rx_init(dev);
393         if (err != 0) {
394                 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
395                 goto error;
396         }
397
398         err = ngbe_dev_rxtx_start(dev);
399         if (err < 0) {
400                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
401                 goto error;
402         }
403
404         err = hw->mac.check_link(hw, &speed, &link_up, 0);
405         if (err != 0)
406                 goto error;
407         dev->data->dev_link.link_status = link_up;
408
409         link_speeds = &dev->data->dev_conf.link_speeds;
410         if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
411                 negotiate = true;
412
413         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
414         if (err != 0)
415                 goto error;
416
417         allowed_speeds = 0;
418         if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
419                 allowed_speeds |= ETH_LINK_SPEED_1G;
420         if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
421                 allowed_speeds |= ETH_LINK_SPEED_100M;
422         if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
423                 allowed_speeds |= ETH_LINK_SPEED_10M;
424
425         if (*link_speeds & ~allowed_speeds) {
426                 PMD_INIT_LOG(ERR, "Invalid link setting");
427                 goto error;
428         }
429
430         speed = 0x0;
431         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
432                 speed = hw->mac.default_speeds;
433         } else {
434                 if (*link_speeds & ETH_LINK_SPEED_1G)
435                         speed |= NGBE_LINK_SPEED_1GB_FULL;
436                 if (*link_speeds & ETH_LINK_SPEED_100M)
437                         speed |= NGBE_LINK_SPEED_100M_FULL;
438                 if (*link_speeds & ETH_LINK_SPEED_10M)
439                         speed |= NGBE_LINK_SPEED_10M_FULL;
440         }
441
442         hw->phy.init_hw(hw);
443         err = hw->mac.setup_link(hw, speed, link_up);
444         if (err != 0)
445                 goto error;
446
447         if (rte_intr_allow_others(intr_handle)) {
448                 ngbe_dev_misc_interrupt_setup(dev);
449                 /* check if lsc interrupt is enabled */
450                 if (dev->data->dev_conf.intr_conf.lsc != 0)
451                         ngbe_dev_lsc_interrupt_setup(dev, TRUE);
452                 else
453                         ngbe_dev_lsc_interrupt_setup(dev, FALSE);
454                 ngbe_dev_macsec_interrupt_setup(dev);
455                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
456         } else {
457                 rte_intr_callback_unregister(intr_handle,
458                                              ngbe_dev_interrupt_handler, dev);
459                 if (dev->data->dev_conf.intr_conf.lsc != 0)
460                         PMD_INIT_LOG(INFO,
461                                      "LSC won't enable because of no intr multiplex");
462         }
463
464         /* check if rxq interrupt is enabled */
465         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
466             rte_intr_dp_is_en(intr_handle))
467                 ngbe_dev_rxq_interrupt_setup(dev);
468
469         /* enable UIO/VFIO intr/eventfd mapping */
470         rte_intr_enable(intr_handle);
471
472         /* resume enabled intr since HW reset */
473         ngbe_enable_intr(dev);
474
475         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
476                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
477                 /* gpio0 is used to power on/off control*/
478                 wr32(hw, NGBE_GPIODATA, 0);
479         }
480
481         /*
482          * Update link status right before return, because it may
483          * start link configuration process in a separate thread.
484          */
485         ngbe_dev_link_update(dev, 0);
486
487         return 0;
488
489 error:
490         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
491         ngbe_dev_clear_queues(dev);
492         return -EIO;
493 }
494
495 /*
496  * Stop device: disable rx and tx functions to allow for reconfiguring.
497  */
498 static int
499 ngbe_dev_stop(struct rte_eth_dev *dev)
500 {
501         struct rte_eth_link link;
502         struct ngbe_hw *hw = ngbe_dev_hw(dev);
503         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
504         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
505
506         if (hw->adapter_stopped)
507                 return 0;
508
509         PMD_INIT_FUNC_TRACE();
510
511         if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
512                 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
513                 /* gpio0 is used to power on/off control*/
514                 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
515         }
516
517         /* disable interrupts */
518         ngbe_disable_intr(hw);
519
520         /* reset the NIC */
521         ngbe_pf_reset_hw(hw);
522         hw->adapter_stopped = 0;
523
524         /* stop adapter */
525         ngbe_stop_hw(hw);
526
527         ngbe_dev_clear_queues(dev);
528
529         /* Clear recorded link status */
530         memset(&link, 0, sizeof(link));
531         rte_eth_linkstatus_set(dev, &link);
532
533         if (!rte_intr_allow_others(intr_handle))
534                 /* resume to the default handler */
535                 rte_intr_callback_register(intr_handle,
536                                            ngbe_dev_interrupt_handler,
537                                            (void *)dev);
538
539         /* Clean datapath event and queue/vec mapping */
540         rte_intr_efd_disable(intr_handle);
541         if (intr_handle->intr_vec != NULL) {
542                 rte_free(intr_handle->intr_vec);
543                 intr_handle->intr_vec = NULL;
544         }
545
546         hw->adapter_stopped = true;
547         dev->data->dev_started = 0;
548
549         return 0;
550 }
551
552 /*
553  * Reset and stop device.
554  */
555 static int
556 ngbe_dev_close(struct rte_eth_dev *dev)
557 {
558         PMD_INIT_FUNC_TRACE();
559
560         RTE_SET_USED(dev);
561
562         return -EINVAL;
563 }
564
565 static int
566 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
567 {
568         struct ngbe_hw *hw = ngbe_dev_hw(dev);
569
570         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
571         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
572         dev_info->min_rx_bufsize = 1024;
573         dev_info->max_rx_pktlen = 15872;
574
575         dev_info->default_rxconf = (struct rte_eth_rxconf) {
576                 .rx_thresh = {
577                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
578                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
579                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
580                 },
581                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
582                 .rx_drop_en = 0,
583                 .offloads = 0,
584         };
585
586         dev_info->default_txconf = (struct rte_eth_txconf) {
587                 .tx_thresh = {
588                         .pthresh = NGBE_DEFAULT_TX_PTHRESH,
589                         .hthresh = NGBE_DEFAULT_TX_HTHRESH,
590                         .wthresh = NGBE_DEFAULT_TX_WTHRESH,
591                 },
592                 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
593                 .offloads = 0,
594         };
595
596         dev_info->rx_desc_lim = rx_desc_lim;
597         dev_info->tx_desc_lim = tx_desc_lim;
598
599         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
600                                 ETH_LINK_SPEED_10M;
601
602         /* Driver-preferred Rx/Tx parameters */
603         dev_info->default_rxportconf.burst_size = 32;
604         dev_info->default_txportconf.burst_size = 32;
605         dev_info->default_rxportconf.nb_queues = 1;
606         dev_info->default_txportconf.nb_queues = 1;
607         dev_info->default_rxportconf.ring_size = 256;
608         dev_info->default_txportconf.ring_size = 256;
609
610         return 0;
611 }
612
613 /* return 0 means link status changed, -1 means not changed */
614 int
615 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
616                             int wait_to_complete)
617 {
618         struct ngbe_hw *hw = ngbe_dev_hw(dev);
619         struct rte_eth_link link;
620         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
621         u32 lan_speed = 0;
622         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
623         bool link_up;
624         int err;
625         int wait = 1;
626
627         memset(&link, 0, sizeof(link));
628         link.link_status = ETH_LINK_DOWN;
629         link.link_speed = ETH_SPEED_NUM_NONE;
630         link.link_duplex = ETH_LINK_HALF_DUPLEX;
631         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
632                         ~ETH_LINK_SPEED_AUTONEG);
633
634         hw->mac.get_link_status = true;
635
636         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
637                 return rte_eth_linkstatus_set(dev, &link);
638
639         /* check if it needs to wait to complete, if lsc interrupt is enabled */
640         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
641                 wait = 0;
642
643         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
644         if (err != 0) {
645                 link.link_speed = ETH_SPEED_NUM_NONE;
646                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
647                 return rte_eth_linkstatus_set(dev, &link);
648         }
649
650         if (!link_up)
651                 return rte_eth_linkstatus_set(dev, &link);
652
653         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
654         link.link_status = ETH_LINK_UP;
655         link.link_duplex = ETH_LINK_FULL_DUPLEX;
656
657         switch (link_speed) {
658         default:
659         case NGBE_LINK_SPEED_UNKNOWN:
660                 link.link_speed = ETH_SPEED_NUM_NONE;
661                 break;
662
663         case NGBE_LINK_SPEED_10M_FULL:
664                 link.link_speed = ETH_SPEED_NUM_10M;
665                 lan_speed = 0;
666                 break;
667
668         case NGBE_LINK_SPEED_100M_FULL:
669                 link.link_speed = ETH_SPEED_NUM_100M;
670                 lan_speed = 1;
671                 break;
672
673         case NGBE_LINK_SPEED_1GB_FULL:
674                 link.link_speed = ETH_SPEED_NUM_1G;
675                 lan_speed = 2;
676                 break;
677         }
678
679         if (hw->is_pf) {
680                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
681                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
682                                 NGBE_LINK_SPEED_100M_FULL |
683                                 NGBE_LINK_SPEED_10M_FULL)) {
684                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
685                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
686                 }
687         }
688
689         return rte_eth_linkstatus_set(dev, &link);
690 }
691
692 static int
693 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
694 {
695         return ngbe_dev_link_update_share(dev, wait_to_complete);
696 }
697
698 /**
699  * It clears the interrupt causes and enables the interrupt.
700  * It will be called once only during NIC initialized.
701  *
702  * @param dev
703  *  Pointer to struct rte_eth_dev.
704  * @param on
705  *  Enable or Disable.
706  *
707  * @return
708  *  - On success, zero.
709  *  - On failure, a negative value.
710  */
711 static int
712 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
713 {
714         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
715
716         ngbe_dev_link_status_print(dev);
717         if (on != 0) {
718                 intr->mask_misc |= NGBE_ICRMISC_PHY;
719                 intr->mask_misc |= NGBE_ICRMISC_GPIO;
720         } else {
721                 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
722                 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
723         }
724
725         return 0;
726 }
727
728 /**
729  * It clears the interrupt causes and enables the interrupt.
730  * It will be called once only during NIC initialized.
731  *
732  * @param dev
733  *  Pointer to struct rte_eth_dev.
734  *
735  * @return
736  *  - On success, zero.
737  *  - On failure, a negative value.
738  */
739 static int
740 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
741 {
742         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
743         u64 mask;
744
745         mask = NGBE_ICR_MASK;
746         mask &= (1ULL << NGBE_MISC_VEC_ID);
747         intr->mask |= mask;
748         intr->mask_misc |= NGBE_ICRMISC_GPIO;
749
750         return 0;
751 }
752
753 /**
754  * It clears the interrupt causes and enables the interrupt.
755  * It will be called once only during NIC initialized.
756  *
757  * @param dev
758  *  Pointer to struct rte_eth_dev.
759  *
760  * @return
761  *  - On success, zero.
762  *  - On failure, a negative value.
763  */
764 static int
765 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
766 {
767         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
768         u64 mask;
769
770         mask = NGBE_ICR_MASK;
771         mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
772         intr->mask |= mask;
773
774         return 0;
775 }
776
777 /**
778  * It clears the interrupt causes and enables the interrupt.
779  * It will be called once only during NIC initialized.
780  *
781  * @param dev
782  *  Pointer to struct rte_eth_dev.
783  *
784  * @return
785  *  - On success, zero.
786  *  - On failure, a negative value.
787  */
788 static int
789 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
790 {
791         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
792
793         intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
794
795         return 0;
796 }
797
798 /*
799  * It reads ICR and sets flag for the link_update.
800  *
801  * @param dev
802  *  Pointer to struct rte_eth_dev.
803  *
804  * @return
805  *  - On success, zero.
806  *  - On failure, a negative value.
807  */
808 static int
809 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
810 {
811         uint32_t eicr;
812         struct ngbe_hw *hw = ngbe_dev_hw(dev);
813         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
814
815         /* clear all cause mask */
816         ngbe_disable_intr(hw);
817
818         /* read-on-clear nic registers here */
819         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
820         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
821
822         intr->flags = 0;
823
824         /* set flag for async link update */
825         if (eicr & NGBE_ICRMISC_PHY)
826                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
827
828         if (eicr & NGBE_ICRMISC_VFMBX)
829                 intr->flags |= NGBE_FLAG_MAILBOX;
830
831         if (eicr & NGBE_ICRMISC_LNKSEC)
832                 intr->flags |= NGBE_FLAG_MACSEC;
833
834         if (eicr & NGBE_ICRMISC_GPIO)
835                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
836
837         return 0;
838 }
839
840 /**
841  * It gets and then prints the link status.
842  *
843  * @param dev
844  *  Pointer to struct rte_eth_dev.
845  *
846  * @return
847  *  - On success, zero.
848  *  - On failure, a negative value.
849  */
850 static void
851 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
852 {
853         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
854         struct rte_eth_link link;
855
856         rte_eth_linkstatus_get(dev, &link);
857
858         if (link.link_status == ETH_LINK_UP) {
859                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
860                                         (int)(dev->data->port_id),
861                                         (unsigned int)link.link_speed,
862                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
863                                         "full-duplex" : "half-duplex");
864         } else {
865                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
866                                 (int)(dev->data->port_id));
867         }
868         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
869                                 pci_dev->addr.domain,
870                                 pci_dev->addr.bus,
871                                 pci_dev->addr.devid,
872                                 pci_dev->addr.function);
873 }
874
875 /*
876  * It executes link_update after knowing an interrupt occurred.
877  *
878  * @param dev
879  *  Pointer to struct rte_eth_dev.
880  *
881  * @return
882  *  - On success, zero.
883  *  - On failure, a negative value.
884  */
885 static int
886 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
887 {
888         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
889         int64_t timeout;
890
891         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
892
893         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
894                 struct rte_eth_link link;
895
896                 /*get the link status before link update, for predicting later*/
897                 rte_eth_linkstatus_get(dev, &link);
898
899                 ngbe_dev_link_update(dev, 0);
900
901                 /* likely to up */
902                 if (link.link_status != ETH_LINK_UP)
903                         /* handle it 1 sec later, wait it being stable */
904                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
905                 /* likely to down */
906                 else
907                         /* handle it 4 sec later, wait it being stable */
908                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
909
910                 ngbe_dev_link_status_print(dev);
911                 if (rte_eal_alarm_set(timeout * 1000,
912                                       ngbe_dev_interrupt_delayed_handler,
913                                       (void *)dev) < 0) {
914                         PMD_DRV_LOG(ERR, "Error setting alarm");
915                 } else {
916                         /* remember original mask */
917                         intr->mask_misc_orig = intr->mask_misc;
918                         /* only disable lsc interrupt */
919                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
920
921                         intr->mask_orig = intr->mask;
922                         /* only disable all misc interrupts */
923                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
924                 }
925         }
926
927         PMD_DRV_LOG(DEBUG, "enable intr immediately");
928         ngbe_enable_intr(dev);
929
930         return 0;
931 }
932
933 /**
934  * Interrupt handler which shall be registered for alarm callback for delayed
935  * handling specific interrupt to wait for the stable nic state. As the
936  * NIC interrupt state is not stable for ngbe after link is just down,
937  * it needs to wait 4 seconds to get the stable status.
938  *
939  * @param param
940  *  The address of parameter (struct rte_eth_dev *) registered before.
941  */
942 static void
943 ngbe_dev_interrupt_delayed_handler(void *param)
944 {
945         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
946         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
947         struct ngbe_hw *hw = ngbe_dev_hw(dev);
948         uint32_t eicr;
949
950         ngbe_disable_intr(hw);
951
952         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
953
954         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
955                 ngbe_dev_link_update(dev, 0);
956                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
957                 ngbe_dev_link_status_print(dev);
958                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
959                                               NULL);
960         }
961
962         if (intr->flags & NGBE_FLAG_MACSEC) {
963                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
964                                               NULL);
965                 intr->flags &= ~NGBE_FLAG_MACSEC;
966         }
967
968         /* restore original mask */
969         intr->mask_misc = intr->mask_misc_orig;
970         intr->mask_misc_orig = 0;
971         intr->mask = intr->mask_orig;
972         intr->mask_orig = 0;
973
974         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
975         ngbe_enable_intr(dev);
976 }
977
978 /**
979  * Interrupt handler triggered by NIC  for handling
980  * specific interrupt.
981  *
982  * @param param
983  *  The address of parameter (struct rte_eth_dev *) registered before.
984  */
985 static void
986 ngbe_dev_interrupt_handler(void *param)
987 {
988         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
989
990         ngbe_dev_interrupt_get_status(dev);
991         ngbe_dev_interrupt_action(dev);
992 }
993
994 /**
995  * Set the IVAR registers, mapping interrupt causes to vectors
996  * @param hw
997  *  pointer to ngbe_hw struct
998  * @direction
999  *  0 for Rx, 1 for Tx, -1 for other causes
1000  * @queue
1001  *  queue to map the corresponding interrupt to
1002  * @msix_vector
1003  *  the vector to map to the corresponding queue
1004  */
1005 void
1006 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1007                    uint8_t queue, uint8_t msix_vector)
1008 {
1009         uint32_t tmp, idx;
1010
1011         if (direction == -1) {
1012                 /* other causes */
1013                 msix_vector |= NGBE_IVARMISC_VLD;
1014                 idx = 0;
1015                 tmp = rd32(hw, NGBE_IVARMISC);
1016                 tmp &= ~(0xFF << idx);
1017                 tmp |= (msix_vector << idx);
1018                 wr32(hw, NGBE_IVARMISC, tmp);
1019         } else {
1020                 /* rx or tx causes */
1021                 /* Workround for ICR lost */
1022                 idx = ((16 * (queue & 1)) + (8 * direction));
1023                 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1024                 tmp &= ~(0xFF << idx);
1025                 tmp |= (msix_vector << idx);
1026                 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1027         }
1028 }
1029
1030 /**
1031  * Sets up the hardware to properly generate MSI-X interrupts
1032  * @hw
1033  *  board private structure
1034  */
1035 static void
1036 ngbe_configure_msix(struct rte_eth_dev *dev)
1037 {
1038         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1039         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1040         struct ngbe_hw *hw = ngbe_dev_hw(dev);
1041         uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1042         uint32_t vec = NGBE_MISC_VEC_ID;
1043         uint32_t gpie;
1044
1045         /*
1046          * Won't configure MSI-X register if no mapping is done
1047          * between intr vector and event fd
1048          * but if MSI-X has been enabled already, need to configure
1049          * auto clean, auto mask and throttling.
1050          */
1051         gpie = rd32(hw, NGBE_GPIE);
1052         if (!rte_intr_dp_is_en(intr_handle) &&
1053             !(gpie & NGBE_GPIE_MSIX))
1054                 return;
1055
1056         if (rte_intr_allow_others(intr_handle)) {
1057                 base = NGBE_RX_VEC_START;
1058                 vec = base;
1059         }
1060
1061         /* setup GPIE for MSI-X mode */
1062         gpie = rd32(hw, NGBE_GPIE);
1063         gpie |= NGBE_GPIE_MSIX;
1064         wr32(hw, NGBE_GPIE, gpie);
1065
1066         /* Populate the IVAR table and set the ITR values to the
1067          * corresponding register.
1068          */
1069         if (rte_intr_dp_is_en(intr_handle)) {
1070                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1071                         queue_id++) {
1072                         /* by default, 1:1 mapping */
1073                         ngbe_set_ivar_map(hw, 0, queue_id, vec);
1074                         intr_handle->intr_vec[queue_id] = vec;
1075                         if (vec < base + intr_handle->nb_efd - 1)
1076                                 vec++;
1077                 }
1078
1079                 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1080         }
1081         wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1082                         NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1083                         | NGBE_ITR_WRDSA);
1084 }
1085
1086 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1087         .dev_configure              = ngbe_dev_configure,
1088         .dev_infos_get              = ngbe_dev_info_get,
1089         .dev_start                  = ngbe_dev_start,
1090         .dev_stop                   = ngbe_dev_stop,
1091         .link_update                = ngbe_dev_link_update,
1092         .rx_queue_start             = ngbe_dev_rx_queue_start,
1093         .rx_queue_stop              = ngbe_dev_rx_queue_stop,
1094         .tx_queue_start             = ngbe_dev_tx_queue_start,
1095         .tx_queue_stop              = ngbe_dev_tx_queue_stop,
1096         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
1097         .rx_queue_release           = ngbe_dev_rx_queue_release,
1098         .tx_queue_setup             = ngbe_dev_tx_queue_setup,
1099         .tx_queue_release           = ngbe_dev_tx_queue_release,
1100 };
1101
1102 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1103 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1104 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1105
1106 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1107 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1108
1109 #ifdef RTE_ETHDEV_DEBUG_RX
1110         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1111 #endif
1112 #ifdef RTE_ETHDEV_DEBUG_TX
1113         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
1114 #endif