1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
19 static void ngbe_dev_interrupt_handler(void *param);
20 static void ngbe_dev_interrupt_delayed_handler(void *param);
23 * The set of PCI devices this driver supports
25 static const struct rte_pci_id pci_id_ngbe_map[] = {
26 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
27 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
28 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
29 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
30 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
31 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
32 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
33 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
34 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
35 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
36 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
37 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
38 { .vendor_id = 0, /* sentinel */ },
41 static const struct rte_eth_desc_lim rx_desc_lim = {
42 .nb_max = NGBE_RING_DESC_MAX,
43 .nb_min = NGBE_RING_DESC_MIN,
44 .nb_align = NGBE_RXD_ALIGN,
47 static const struct eth_dev_ops ngbe_eth_dev_ops;
50 ngbe_enable_intr(struct rte_eth_dev *dev)
52 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
53 struct ngbe_hw *hw = ngbe_dev_hw(dev);
55 wr32(hw, NGBE_IENMISC, intr->mask_misc);
56 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
61 ngbe_disable_intr(struct ngbe_hw *hw)
63 PMD_INIT_FUNC_TRACE();
65 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
70 * Ensure that all locks are released before first NVM or PHY access
73 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
78 * These ones are more tricky since they are common to all ports; but
79 * swfw_sync retries last long enough (1s) to be almost sure that if
80 * lock can not be taken it is due to an improper lock of the
83 mask = NGBE_MNGSEM_SWPHY |
86 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
87 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
89 hw->mac.release_swfw_sync(hw, mask);
93 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
95 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
96 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
97 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
98 const struct rte_memzone *mz;
102 PMD_INIT_FUNC_TRACE();
104 eth_dev->dev_ops = &ngbe_eth_dev_ops;
106 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
109 rte_eth_copy_pci_info(eth_dev, pci_dev);
111 /* Vendor and Device ID need to be set before init of shared code */
112 hw->device_id = pci_dev->id.device_id;
113 hw->vendor_id = pci_dev->id.vendor_id;
114 hw->sub_system_id = pci_dev->id.subsystem_device_id;
115 ngbe_map_device_id(hw);
116 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
118 /* Reserve memory for interrupt status block */
119 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
120 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
124 hw->isb_dma = TMZ_PADDR(mz);
125 hw->isb_mem = TMZ_VADDR(mz);
127 /* Initialize the shared code (base driver) */
128 err = ngbe_init_shared_code(hw);
130 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
134 /* Unlock any pending hardware semaphore */
135 ngbe_swfw_lock_reset(hw);
137 err = hw->rom.init_params(hw);
139 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
143 /* Make sure we have a good EEPROM before we read from it */
144 err = hw->rom.validate_checksum(hw, NULL);
146 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
150 err = hw->mac.init_hw(hw);
152 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
156 /* disable interrupt */
157 ngbe_disable_intr(hw);
159 /* Allocate memory for storing MAC addresses */
160 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
161 hw->mac.num_rar_entries, 0);
162 if (eth_dev->data->mac_addrs == NULL) {
164 "Failed to allocate %u bytes needed to store MAC addresses",
165 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
169 /* Copy the permanent MAC address */
170 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
171 ð_dev->data->mac_addrs[0]);
173 /* Allocate memory for storing hash filter MAC addresses */
174 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
175 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
176 if (eth_dev->data->hash_mac_addrs == NULL) {
178 "Failed to allocate %d bytes needed to store MAC addresses",
179 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
180 rte_free(eth_dev->data->mac_addrs);
181 eth_dev->data->mac_addrs = NULL;
185 ctrl_ext = rd32(hw, NGBE_PORTCTL);
186 /* let hardware know driver is loaded */
187 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
188 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
189 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
190 wr32(hw, NGBE_PORTCTL, ctrl_ext);
193 rte_intr_callback_register(intr_handle,
194 ngbe_dev_interrupt_handler, eth_dev);
196 /* enable uio/vfio intr/eventfd mapping */
197 rte_intr_enable(intr_handle);
199 /* enable support intr */
200 ngbe_enable_intr(eth_dev);
206 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
208 PMD_INIT_FUNC_TRACE();
210 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
213 ngbe_dev_close(eth_dev);
219 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
220 struct rte_pci_device *pci_dev)
222 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
223 sizeof(struct ngbe_adapter),
224 eth_dev_pci_specific_init, pci_dev,
225 eth_ngbe_dev_init, NULL);
228 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
230 struct rte_eth_dev *ethdev;
232 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
236 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
239 static struct rte_pci_driver rte_ngbe_pmd = {
240 .id_table = pci_id_ngbe_map,
241 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
242 RTE_PCI_DRV_INTR_LSC,
243 .probe = eth_ngbe_pci_probe,
244 .remove = eth_ngbe_pci_remove,
248 ngbe_dev_configure(struct rte_eth_dev *dev)
250 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
251 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
253 PMD_INIT_FUNC_TRACE();
255 /* set flag to update link status after init */
256 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
259 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
260 * allocation Rx preconditions we will reset it.
262 adapter->rx_bulk_alloc_allowed = true;
268 * Reset and stop device.
271 ngbe_dev_close(struct rte_eth_dev *dev)
273 PMD_INIT_FUNC_TRACE();
281 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
283 struct ngbe_hw *hw = ngbe_dev_hw(dev);
285 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
287 dev_info->default_rxconf = (struct rte_eth_rxconf) {
289 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
290 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
291 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
293 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
298 dev_info->rx_desc_lim = rx_desc_lim;
300 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
303 /* Driver-preferred Rx/Tx parameters */
304 dev_info->default_rxportconf.nb_queues = 1;
305 dev_info->default_rxportconf.ring_size = 256;
310 /* return 0 means link status changed, -1 means not changed */
312 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
313 int wait_to_complete)
315 struct ngbe_hw *hw = ngbe_dev_hw(dev);
316 struct rte_eth_link link;
317 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
319 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
324 memset(&link, 0, sizeof(link));
325 link.link_status = ETH_LINK_DOWN;
326 link.link_speed = ETH_SPEED_NUM_NONE;
327 link.link_duplex = ETH_LINK_HALF_DUPLEX;
328 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
329 ~ETH_LINK_SPEED_AUTONEG);
331 hw->mac.get_link_status = true;
333 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
334 return rte_eth_linkstatus_set(dev, &link);
336 /* check if it needs to wait to complete, if lsc interrupt is enabled */
337 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
340 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
342 link.link_speed = ETH_SPEED_NUM_NONE;
343 link.link_duplex = ETH_LINK_FULL_DUPLEX;
344 return rte_eth_linkstatus_set(dev, &link);
348 return rte_eth_linkstatus_set(dev, &link);
350 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
351 link.link_status = ETH_LINK_UP;
352 link.link_duplex = ETH_LINK_FULL_DUPLEX;
354 switch (link_speed) {
356 case NGBE_LINK_SPEED_UNKNOWN:
357 link.link_speed = ETH_SPEED_NUM_NONE;
360 case NGBE_LINK_SPEED_10M_FULL:
361 link.link_speed = ETH_SPEED_NUM_10M;
365 case NGBE_LINK_SPEED_100M_FULL:
366 link.link_speed = ETH_SPEED_NUM_100M;
370 case NGBE_LINK_SPEED_1GB_FULL:
371 link.link_speed = ETH_SPEED_NUM_1G;
377 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
378 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
379 NGBE_LINK_SPEED_100M_FULL |
380 NGBE_LINK_SPEED_10M_FULL)) {
381 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
382 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
386 return rte_eth_linkstatus_set(dev, &link);
390 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
392 return ngbe_dev_link_update_share(dev, wait_to_complete);
396 * It reads ICR and sets flag for the link_update.
399 * Pointer to struct rte_eth_dev.
402 * - On success, zero.
403 * - On failure, a negative value.
406 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
409 struct ngbe_hw *hw = ngbe_dev_hw(dev);
410 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
412 /* clear all cause mask */
413 ngbe_disable_intr(hw);
415 /* read-on-clear nic registers here */
416 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
417 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
421 /* set flag for async link update */
422 if (eicr & NGBE_ICRMISC_PHY)
423 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
425 if (eicr & NGBE_ICRMISC_VFMBX)
426 intr->flags |= NGBE_FLAG_MAILBOX;
428 if (eicr & NGBE_ICRMISC_LNKSEC)
429 intr->flags |= NGBE_FLAG_MACSEC;
431 if (eicr & NGBE_ICRMISC_GPIO)
432 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
438 * It gets and then prints the link status.
441 * Pointer to struct rte_eth_dev.
444 * - On success, zero.
445 * - On failure, a negative value.
448 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
450 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
451 struct rte_eth_link link;
453 rte_eth_linkstatus_get(dev, &link);
455 if (link.link_status == ETH_LINK_UP) {
456 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
457 (int)(dev->data->port_id),
458 (unsigned int)link.link_speed,
459 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
460 "full-duplex" : "half-duplex");
462 PMD_INIT_LOG(INFO, " Port %d: Link Down",
463 (int)(dev->data->port_id));
465 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
466 pci_dev->addr.domain,
469 pci_dev->addr.function);
473 * It executes link_update after knowing an interrupt occurred.
476 * Pointer to struct rte_eth_dev.
479 * - On success, zero.
480 * - On failure, a negative value.
483 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
485 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
488 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
490 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
491 struct rte_eth_link link;
493 /*get the link status before link update, for predicting later*/
494 rte_eth_linkstatus_get(dev, &link);
496 ngbe_dev_link_update(dev, 0);
499 if (link.link_status != ETH_LINK_UP)
500 /* handle it 1 sec later, wait it being stable */
501 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
504 /* handle it 4 sec later, wait it being stable */
505 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
507 ngbe_dev_link_status_print(dev);
508 if (rte_eal_alarm_set(timeout * 1000,
509 ngbe_dev_interrupt_delayed_handler,
511 PMD_DRV_LOG(ERR, "Error setting alarm");
513 /* remember original mask */
514 intr->mask_misc_orig = intr->mask_misc;
515 /* only disable lsc interrupt */
516 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
518 intr->mask_orig = intr->mask;
519 /* only disable all misc interrupts */
520 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
524 PMD_DRV_LOG(DEBUG, "enable intr immediately");
525 ngbe_enable_intr(dev);
531 * Interrupt handler which shall be registered for alarm callback for delayed
532 * handling specific interrupt to wait for the stable nic state. As the
533 * NIC interrupt state is not stable for ngbe after link is just down,
534 * it needs to wait 4 seconds to get the stable status.
537 * The address of parameter (struct rte_eth_dev *) registered before.
540 ngbe_dev_interrupt_delayed_handler(void *param)
542 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
543 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
544 struct ngbe_hw *hw = ngbe_dev_hw(dev);
547 ngbe_disable_intr(hw);
549 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
551 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
552 ngbe_dev_link_update(dev, 0);
553 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
554 ngbe_dev_link_status_print(dev);
555 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
559 if (intr->flags & NGBE_FLAG_MACSEC) {
560 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
562 intr->flags &= ~NGBE_FLAG_MACSEC;
565 /* restore original mask */
566 intr->mask_misc = intr->mask_misc_orig;
567 intr->mask_misc_orig = 0;
568 intr->mask = intr->mask_orig;
571 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
572 ngbe_enable_intr(dev);
576 * Interrupt handler triggered by NIC for handling
577 * specific interrupt.
580 * The address of parameter (struct rte_eth_dev *) registered before.
583 ngbe_dev_interrupt_handler(void *param)
585 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
587 ngbe_dev_interrupt_get_status(dev);
588 ngbe_dev_interrupt_action(dev);
591 static const struct eth_dev_ops ngbe_eth_dev_ops = {
592 .dev_configure = ngbe_dev_configure,
593 .dev_infos_get = ngbe_dev_info_get,
594 .link_update = ngbe_dev_link_update,
595 .rx_queue_setup = ngbe_dev_rx_queue_setup,
596 .rx_queue_release = ngbe_dev_rx_queue_release,
599 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
600 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
601 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
603 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
604 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
606 #ifdef RTE_ETHDEV_DEBUG_RX
607 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
609 #ifdef RTE_ETHDEV_DEBUG_TX
610 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);