1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
10 #include <rte_alarm.h>
12 #include "ngbe_logs.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
21 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
22 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
23 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
24 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
25 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
26 static void ngbe_dev_interrupt_handler(void *param);
27 static void ngbe_dev_interrupt_delayed_handler(void *param);
28 static void ngbe_configure_msix(struct rte_eth_dev *dev);
31 * The set of PCI devices this driver supports
33 static const struct rte_pci_id pci_id_ngbe_map[] = {
34 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
35 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
36 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
37 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
38 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
39 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
40 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
41 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
42 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
43 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
45 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
46 { .vendor_id = 0, /* sentinel */ },
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50 .nb_max = NGBE_RING_DESC_MAX,
51 .nb_min = NGBE_RING_DESC_MIN,
52 .nb_align = NGBE_RXD_ALIGN,
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56 .nb_max = NGBE_RING_DESC_MAX,
57 .nb_min = NGBE_RING_DESC_MIN,
58 .nb_align = NGBE_TXD_ALIGN,
59 .nb_seg_max = NGBE_TX_MAX_SEG,
60 .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
63 static const struct eth_dev_ops ngbe_eth_dev_ops;
66 ngbe_pf_reset_hw(struct ngbe_hw *hw)
71 status = hw->mac.reset_hw(hw);
73 ctrl_ext = rd32(hw, NGBE_PORTCTL);
74 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
75 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
76 wr32(hw, NGBE_PORTCTL, ctrl_ext);
79 if (status == NGBE_ERR_SFP_NOT_PRESENT)
85 ngbe_enable_intr(struct rte_eth_dev *dev)
87 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
88 struct ngbe_hw *hw = ngbe_dev_hw(dev);
90 wr32(hw, NGBE_IENMISC, intr->mask_misc);
91 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
96 ngbe_disable_intr(struct ngbe_hw *hw)
98 PMD_INIT_FUNC_TRACE();
100 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
105 * Ensure that all locks are released before first NVM or PHY access
108 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
113 * These ones are more tricky since they are common to all ports; but
114 * swfw_sync retries last long enough (1s) to be almost sure that if
115 * lock can not be taken it is due to an improper lock of the
118 mask = NGBE_MNGSEM_SWPHY |
121 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
122 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
124 hw->mac.release_swfw_sync(hw, mask);
128 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
130 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
131 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
132 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
133 const struct rte_memzone *mz;
137 PMD_INIT_FUNC_TRACE();
139 eth_dev->dev_ops = &ngbe_eth_dev_ops;
140 eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
141 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
142 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
145 * For secondary processes, we don't initialise any further as primary
146 * has already done this work. Only check we don't need a different
147 * Rx and Tx function.
149 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
150 struct ngbe_tx_queue *txq;
151 /* Tx queue function in primary, set by last queue initialized
152 * Tx queue may not initialized by primary process
154 if (eth_dev->data->tx_queues) {
155 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
156 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
157 ngbe_set_tx_function(eth_dev, txq);
159 /* Use default Tx function if we get here */
161 "No Tx queues configured yet. Using default Tx function.");
164 ngbe_set_rx_function(eth_dev);
169 rte_eth_copy_pci_info(eth_dev, pci_dev);
171 /* Vendor and Device ID need to be set before init of shared code */
172 hw->device_id = pci_dev->id.device_id;
173 hw->vendor_id = pci_dev->id.vendor_id;
174 hw->sub_system_id = pci_dev->id.subsystem_device_id;
175 ngbe_map_device_id(hw);
176 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
178 /* Reserve memory for interrupt status block */
179 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
180 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
184 hw->isb_dma = TMZ_PADDR(mz);
185 hw->isb_mem = TMZ_VADDR(mz);
187 /* Initialize the shared code (base driver) */
188 err = ngbe_init_shared_code(hw);
190 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
194 /* Unlock any pending hardware semaphore */
195 ngbe_swfw_lock_reset(hw);
197 err = hw->rom.init_params(hw);
199 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
203 /* Make sure we have a good EEPROM before we read from it */
204 err = hw->rom.validate_checksum(hw, NULL);
206 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
210 err = hw->mac.init_hw(hw);
212 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
216 /* disable interrupt */
217 ngbe_disable_intr(hw);
219 /* Allocate memory for storing MAC addresses */
220 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
221 hw->mac.num_rar_entries, 0);
222 if (eth_dev->data->mac_addrs == NULL) {
224 "Failed to allocate %u bytes needed to store MAC addresses",
225 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
229 /* Copy the permanent MAC address */
230 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
231 ð_dev->data->mac_addrs[0]);
233 /* Allocate memory for storing hash filter MAC addresses */
234 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
235 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
236 if (eth_dev->data->hash_mac_addrs == NULL) {
238 "Failed to allocate %d bytes needed to store MAC addresses",
239 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
240 rte_free(eth_dev->data->mac_addrs);
241 eth_dev->data->mac_addrs = NULL;
245 ctrl_ext = rd32(hw, NGBE_PORTCTL);
246 /* let hardware know driver is loaded */
247 ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
248 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
249 ctrl_ext |= NGBE_PORTCTL_RSTDONE;
250 wr32(hw, NGBE_PORTCTL, ctrl_ext);
253 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
254 (int)hw->mac.type, (int)hw->phy.type);
256 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
257 eth_dev->data->port_id, pci_dev->id.vendor_id,
258 pci_dev->id.device_id);
260 rte_intr_callback_register(intr_handle,
261 ngbe_dev_interrupt_handler, eth_dev);
263 /* enable uio/vfio intr/eventfd mapping */
264 rte_intr_enable(intr_handle);
266 /* enable support intr */
267 ngbe_enable_intr(eth_dev);
273 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
275 PMD_INIT_FUNC_TRACE();
277 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
280 ngbe_dev_close(eth_dev);
286 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
287 struct rte_pci_device *pci_dev)
289 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
290 sizeof(struct ngbe_adapter),
291 eth_dev_pci_specific_init, pci_dev,
292 eth_ngbe_dev_init, NULL);
295 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
297 struct rte_eth_dev *ethdev;
299 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
303 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
306 static struct rte_pci_driver rte_ngbe_pmd = {
307 .id_table = pci_id_ngbe_map,
308 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
309 RTE_PCI_DRV_INTR_LSC,
310 .probe = eth_ngbe_pci_probe,
311 .remove = eth_ngbe_pci_remove,
315 ngbe_dev_configure(struct rte_eth_dev *dev)
317 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
318 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
320 PMD_INIT_FUNC_TRACE();
322 /* set flag to update link status after init */
323 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
326 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
327 * allocation Rx preconditions we will reset it.
329 adapter->rx_bulk_alloc_allowed = true;
335 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
337 struct ngbe_hw *hw = ngbe_dev_hw(dev);
338 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
340 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
341 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
342 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
343 if (hw->phy.type == ngbe_phy_yt8521s_sfi)
344 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
346 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
348 intr->mask_misc |= NGBE_ICRMISC_GPIO;
352 * Configure device link speed and setup link.
353 * It returns 0 on success.
356 ngbe_dev_start(struct rte_eth_dev *dev)
358 struct ngbe_hw *hw = ngbe_dev_hw(dev);
359 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
360 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
361 uint32_t intr_vector = 0;
363 bool link_up = false, negotiate = false;
365 uint32_t allowed_speeds = 0;
367 uint32_t *link_speeds;
369 PMD_INIT_FUNC_TRACE();
371 /* disable uio/vfio intr/eventfd mapping */
372 rte_intr_disable(intr_handle);
375 hw->adapter_stopped = 0;
378 /* reinitialize adapter, this calls reset and start */
379 hw->nb_rx_queues = dev->data->nb_rx_queues;
380 hw->nb_tx_queues = dev->data->nb_tx_queues;
381 status = ngbe_pf_reset_hw(hw);
384 hw->mac.start_hw(hw);
385 hw->mac.get_link_status = true;
387 ngbe_dev_phy_intr_setup(dev);
389 /* check and configure queue intr-vector mapping */
390 if ((rte_intr_cap_multiple(intr_handle) ||
391 !RTE_ETH_DEV_SRIOV(dev).active) &&
392 dev->data->dev_conf.intr_conf.rxq != 0) {
393 intr_vector = dev->data->nb_rx_queues;
394 if (rte_intr_efd_enable(intr_handle, intr_vector))
398 if (rte_intr_dp_is_en(intr_handle)) {
399 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
400 dev->data->nb_rx_queues)) {
402 "Failed to allocate %d rx_queues intr_vec",
403 dev->data->nb_rx_queues);
408 /* confiugre MSI-X for sleep until Rx interrupt */
409 ngbe_configure_msix(dev);
411 /* initialize transmission unit */
412 ngbe_dev_tx_init(dev);
414 /* This can fail when allocating mbufs for descriptor rings */
415 err = ngbe_dev_rx_init(dev);
417 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
421 err = ngbe_dev_rxtx_start(dev);
423 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
427 err = hw->mac.check_link(hw, &speed, &link_up, 0);
430 dev->data->dev_link.link_status = link_up;
432 link_speeds = &dev->data->dev_conf.link_speeds;
433 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
436 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
441 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
442 allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
443 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
444 allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
445 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
446 allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
448 if (*link_speeds & ~allowed_speeds) {
449 PMD_INIT_LOG(ERR, "Invalid link setting");
454 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
455 speed = hw->mac.default_speeds;
457 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
458 speed |= NGBE_LINK_SPEED_1GB_FULL;
459 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
460 speed |= NGBE_LINK_SPEED_100M_FULL;
461 if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
462 speed |= NGBE_LINK_SPEED_10M_FULL;
466 err = hw->mac.setup_link(hw, speed, link_up);
470 if (rte_intr_allow_others(intr_handle)) {
471 ngbe_dev_misc_interrupt_setup(dev);
472 /* check if lsc interrupt is enabled */
473 if (dev->data->dev_conf.intr_conf.lsc != 0)
474 ngbe_dev_lsc_interrupt_setup(dev, TRUE);
476 ngbe_dev_lsc_interrupt_setup(dev, FALSE);
477 ngbe_dev_macsec_interrupt_setup(dev);
478 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
480 rte_intr_callback_unregister(intr_handle,
481 ngbe_dev_interrupt_handler, dev);
482 if (dev->data->dev_conf.intr_conf.lsc != 0)
484 "LSC won't enable because of no intr multiplex");
487 /* check if rxq interrupt is enabled */
488 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
489 rte_intr_dp_is_en(intr_handle))
490 ngbe_dev_rxq_interrupt_setup(dev);
492 /* enable UIO/VFIO intr/eventfd mapping */
493 rte_intr_enable(intr_handle);
495 /* resume enabled intr since HW reset */
496 ngbe_enable_intr(dev);
498 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
499 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
500 /* gpio0 is used to power on/off control*/
501 wr32(hw, NGBE_GPIODATA, 0);
505 * Update link status right before return, because it may
506 * start link configuration process in a separate thread.
508 ngbe_dev_link_update(dev, 0);
513 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
514 ngbe_dev_clear_queues(dev);
519 * Stop device: disable rx and tx functions to allow for reconfiguring.
522 ngbe_dev_stop(struct rte_eth_dev *dev)
524 struct rte_eth_link link;
525 struct ngbe_hw *hw = ngbe_dev_hw(dev);
526 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
527 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
529 if (hw->adapter_stopped)
532 PMD_INIT_FUNC_TRACE();
534 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
535 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
536 /* gpio0 is used to power on/off control*/
537 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
540 /* disable interrupts */
541 ngbe_disable_intr(hw);
544 ngbe_pf_reset_hw(hw);
545 hw->adapter_stopped = 0;
550 ngbe_dev_clear_queues(dev);
552 /* Clear stored conf */
553 dev->data->scattered_rx = 0;
555 /* Clear recorded link status */
556 memset(&link, 0, sizeof(link));
557 rte_eth_linkstatus_set(dev, &link);
559 if (!rte_intr_allow_others(intr_handle))
560 /* resume to the default handler */
561 rte_intr_callback_register(intr_handle,
562 ngbe_dev_interrupt_handler,
565 /* Clean datapath event and queue/vec mapping */
566 rte_intr_efd_disable(intr_handle);
567 rte_intr_vec_list_free(intr_handle);
569 hw->adapter_stopped = true;
570 dev->data->dev_started = 0;
576 * Reset and stop device.
579 ngbe_dev_close(struct rte_eth_dev *dev)
581 struct ngbe_hw *hw = ngbe_dev_hw(dev);
582 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
583 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
587 PMD_INIT_FUNC_TRACE();
589 ngbe_pf_reset_hw(hw);
593 ngbe_dev_free_queues(dev);
595 /* reprogram the RAR[0] in case user changed it. */
596 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
598 /* Unlock any pending hardware semaphore */
599 ngbe_swfw_lock_reset(hw);
601 /* disable uio intr before callback unregister */
602 rte_intr_disable(intr_handle);
605 ret = rte_intr_callback_unregister(intr_handle,
606 ngbe_dev_interrupt_handler, dev);
607 if (ret >= 0 || ret == -ENOENT) {
609 } else if (ret != -EAGAIN) {
611 "intr callback unregister failed: %d",
615 } while (retries++ < (10 + NGBE_LINK_UP_TIME));
617 rte_free(dev->data->mac_addrs);
618 dev->data->mac_addrs = NULL;
620 rte_free(dev->data->hash_mac_addrs);
621 dev->data->hash_mac_addrs = NULL;
630 ngbe_dev_reset(struct rte_eth_dev *dev)
634 ret = eth_ngbe_dev_uninit(dev);
638 ret = eth_ngbe_dev_init(dev, NULL);
644 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
646 struct ngbe_hw *hw = ngbe_dev_hw(dev);
648 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
649 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
650 dev_info->min_rx_bufsize = 1024;
651 dev_info->max_rx_pktlen = 15872;
652 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
653 dev_info->rx_queue_offload_capa);
654 dev_info->tx_queue_offload_capa = 0;
655 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
657 dev_info->default_rxconf = (struct rte_eth_rxconf) {
659 .pthresh = NGBE_DEFAULT_RX_PTHRESH,
660 .hthresh = NGBE_DEFAULT_RX_HTHRESH,
661 .wthresh = NGBE_DEFAULT_RX_WTHRESH,
663 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
668 dev_info->default_txconf = (struct rte_eth_txconf) {
670 .pthresh = NGBE_DEFAULT_TX_PTHRESH,
671 .hthresh = NGBE_DEFAULT_TX_HTHRESH,
672 .wthresh = NGBE_DEFAULT_TX_WTHRESH,
674 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
678 dev_info->rx_desc_lim = rx_desc_lim;
679 dev_info->tx_desc_lim = tx_desc_lim;
681 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
682 RTE_ETH_LINK_SPEED_10M;
684 /* Driver-preferred Rx/Tx parameters */
685 dev_info->default_rxportconf.burst_size = 32;
686 dev_info->default_txportconf.burst_size = 32;
687 dev_info->default_rxportconf.nb_queues = 1;
688 dev_info->default_txportconf.nb_queues = 1;
689 dev_info->default_rxportconf.ring_size = 256;
690 dev_info->default_txportconf.ring_size = 256;
696 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
698 if (dev->rx_pkt_burst == ngbe_recv_pkts ||
699 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
700 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
701 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
702 return ngbe_get_supported_ptypes();
707 /* return 0 means link status changed, -1 means not changed */
709 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
710 int wait_to_complete)
712 struct ngbe_hw *hw = ngbe_dev_hw(dev);
713 struct rte_eth_link link;
714 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
716 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
721 memset(&link, 0, sizeof(link));
722 link.link_status = RTE_ETH_LINK_DOWN;
723 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
724 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
725 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
726 ~RTE_ETH_LINK_SPEED_AUTONEG);
728 hw->mac.get_link_status = true;
730 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
731 return rte_eth_linkstatus_set(dev, &link);
733 /* check if it needs to wait to complete, if lsc interrupt is enabled */
734 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
737 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
739 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
740 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
741 return rte_eth_linkstatus_set(dev, &link);
745 return rte_eth_linkstatus_set(dev, &link);
747 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
748 link.link_status = RTE_ETH_LINK_UP;
749 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
751 switch (link_speed) {
753 case NGBE_LINK_SPEED_UNKNOWN:
754 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
757 case NGBE_LINK_SPEED_10M_FULL:
758 link.link_speed = RTE_ETH_SPEED_NUM_10M;
762 case NGBE_LINK_SPEED_100M_FULL:
763 link.link_speed = RTE_ETH_SPEED_NUM_100M;
767 case NGBE_LINK_SPEED_1GB_FULL:
768 link.link_speed = RTE_ETH_SPEED_NUM_1G;
774 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
775 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
776 NGBE_LINK_SPEED_100M_FULL |
777 NGBE_LINK_SPEED_10M_FULL)) {
778 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
779 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
783 return rte_eth_linkstatus_set(dev, &link);
787 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
789 return ngbe_dev_link_update_share(dev, wait_to_complete);
793 * It clears the interrupt causes and enables the interrupt.
794 * It will be called once only during NIC initialized.
797 * Pointer to struct rte_eth_dev.
802 * - On success, zero.
803 * - On failure, a negative value.
806 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
808 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
810 ngbe_dev_link_status_print(dev);
812 intr->mask_misc |= NGBE_ICRMISC_PHY;
813 intr->mask_misc |= NGBE_ICRMISC_GPIO;
815 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
816 intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
823 * It clears the interrupt causes and enables the interrupt.
824 * It will be called once only during NIC initialized.
827 * Pointer to struct rte_eth_dev.
830 * - On success, zero.
831 * - On failure, a negative value.
834 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
836 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
839 mask = NGBE_ICR_MASK;
840 mask &= (1ULL << NGBE_MISC_VEC_ID);
842 intr->mask_misc |= NGBE_ICRMISC_GPIO;
848 * It clears the interrupt causes and enables the interrupt.
849 * It will be called once only during NIC initialized.
852 * Pointer to struct rte_eth_dev.
855 * - On success, zero.
856 * - On failure, a negative value.
859 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
861 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
864 mask = NGBE_ICR_MASK;
865 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
872 * It clears the interrupt causes and enables the interrupt.
873 * It will be called once only during NIC initialized.
876 * Pointer to struct rte_eth_dev.
879 * - On success, zero.
880 * - On failure, a negative value.
883 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
885 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
887 intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
893 * It reads ICR and sets flag for the link_update.
896 * Pointer to struct rte_eth_dev.
899 * - On success, zero.
900 * - On failure, a negative value.
903 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
906 struct ngbe_hw *hw = ngbe_dev_hw(dev);
907 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
909 /* clear all cause mask */
910 ngbe_disable_intr(hw);
912 /* read-on-clear nic registers here */
913 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
914 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
918 /* set flag for async link update */
919 if (eicr & NGBE_ICRMISC_PHY)
920 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
922 if (eicr & NGBE_ICRMISC_VFMBX)
923 intr->flags |= NGBE_FLAG_MAILBOX;
925 if (eicr & NGBE_ICRMISC_LNKSEC)
926 intr->flags |= NGBE_FLAG_MACSEC;
928 if (eicr & NGBE_ICRMISC_GPIO)
929 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
935 * It gets and then prints the link status.
938 * Pointer to struct rte_eth_dev.
941 * - On success, zero.
942 * - On failure, a negative value.
945 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
947 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
948 struct rte_eth_link link;
950 rte_eth_linkstatus_get(dev, &link);
952 if (link.link_status == RTE_ETH_LINK_UP) {
953 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
954 (int)(dev->data->port_id),
955 (unsigned int)link.link_speed,
956 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
957 "full-duplex" : "half-duplex");
959 PMD_INIT_LOG(INFO, " Port %d: Link Down",
960 (int)(dev->data->port_id));
962 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
963 pci_dev->addr.domain,
966 pci_dev->addr.function);
970 * It executes link_update after knowing an interrupt occurred.
973 * Pointer to struct rte_eth_dev.
976 * - On success, zero.
977 * - On failure, a negative value.
980 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
982 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
985 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
987 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
988 struct rte_eth_link link;
990 /*get the link status before link update, for predicting later*/
991 rte_eth_linkstatus_get(dev, &link);
993 ngbe_dev_link_update(dev, 0);
996 if (link.link_status != RTE_ETH_LINK_UP)
997 /* handle it 1 sec later, wait it being stable */
998 timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
1001 /* handle it 4 sec later, wait it being stable */
1002 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
1004 ngbe_dev_link_status_print(dev);
1005 if (rte_eal_alarm_set(timeout * 1000,
1006 ngbe_dev_interrupt_delayed_handler,
1008 PMD_DRV_LOG(ERR, "Error setting alarm");
1010 /* remember original mask */
1011 intr->mask_misc_orig = intr->mask_misc;
1012 /* only disable lsc interrupt */
1013 intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1015 intr->mask_orig = intr->mask;
1016 /* only disable all misc interrupts */
1017 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
1021 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1022 ngbe_enable_intr(dev);
1028 * Interrupt handler which shall be registered for alarm callback for delayed
1029 * handling specific interrupt to wait for the stable nic state. As the
1030 * NIC interrupt state is not stable for ngbe after link is just down,
1031 * it needs to wait 4 seconds to get the stable status.
1034 * The address of parameter (struct rte_eth_dev *) registered before.
1037 ngbe_dev_interrupt_delayed_handler(void *param)
1039 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1040 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1041 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1044 ngbe_disable_intr(hw);
1046 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1048 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1049 ngbe_dev_link_update(dev, 0);
1050 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
1051 ngbe_dev_link_status_print(dev);
1052 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1056 if (intr->flags & NGBE_FLAG_MACSEC) {
1057 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1059 intr->flags &= ~NGBE_FLAG_MACSEC;
1062 /* restore original mask */
1063 intr->mask_misc = intr->mask_misc_orig;
1064 intr->mask_misc_orig = 0;
1065 intr->mask = intr->mask_orig;
1066 intr->mask_orig = 0;
1068 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1069 ngbe_enable_intr(dev);
1073 * Interrupt handler triggered by NIC for handling
1074 * specific interrupt.
1077 * The address of parameter (struct rte_eth_dev *) registered before.
1080 ngbe_dev_interrupt_handler(void *param)
1082 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1084 ngbe_dev_interrupt_get_status(dev);
1085 ngbe_dev_interrupt_action(dev);
1089 * Set the IVAR registers, mapping interrupt causes to vectors
1091 * pointer to ngbe_hw struct
1093 * 0 for Rx, 1 for Tx, -1 for other causes
1095 * queue to map the corresponding interrupt to
1097 * the vector to map to the corresponding queue
1100 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1101 uint8_t queue, uint8_t msix_vector)
1105 if (direction == -1) {
1107 msix_vector |= NGBE_IVARMISC_VLD;
1109 tmp = rd32(hw, NGBE_IVARMISC);
1110 tmp &= ~(0xFF << idx);
1111 tmp |= (msix_vector << idx);
1112 wr32(hw, NGBE_IVARMISC, tmp);
1114 /* rx or tx causes */
1115 /* Workround for ICR lost */
1116 idx = ((16 * (queue & 1)) + (8 * direction));
1117 tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1118 tmp &= ~(0xFF << idx);
1119 tmp |= (msix_vector << idx);
1120 wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1125 * Sets up the hardware to properly generate MSI-X interrupts
1127 * board private structure
1130 ngbe_configure_msix(struct rte_eth_dev *dev)
1132 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1133 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1134 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1135 uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1136 uint32_t vec = NGBE_MISC_VEC_ID;
1140 * Won't configure MSI-X register if no mapping is done
1141 * between intr vector and event fd
1142 * but if MSI-X has been enabled already, need to configure
1143 * auto clean, auto mask and throttling.
1145 gpie = rd32(hw, NGBE_GPIE);
1146 if (!rte_intr_dp_is_en(intr_handle) &&
1147 !(gpie & NGBE_GPIE_MSIX))
1150 if (rte_intr_allow_others(intr_handle)) {
1151 base = NGBE_RX_VEC_START;
1155 /* setup GPIE for MSI-X mode */
1156 gpie = rd32(hw, NGBE_GPIE);
1157 gpie |= NGBE_GPIE_MSIX;
1158 wr32(hw, NGBE_GPIE, gpie);
1160 /* Populate the IVAR table and set the ITR values to the
1161 * corresponding register.
1163 if (rte_intr_dp_is_en(intr_handle)) {
1164 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1166 /* by default, 1:1 mapping */
1167 ngbe_set_ivar_map(hw, 0, queue_id, vec);
1168 rte_intr_vec_list_index_set(intr_handle,
1170 if (vec < base + rte_intr_nb_efd_get(intr_handle)
1175 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1177 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1178 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1182 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1183 .dev_configure = ngbe_dev_configure,
1184 .dev_infos_get = ngbe_dev_info_get,
1185 .dev_start = ngbe_dev_start,
1186 .dev_stop = ngbe_dev_stop,
1187 .dev_close = ngbe_dev_close,
1188 .dev_reset = ngbe_dev_reset,
1189 .link_update = ngbe_dev_link_update,
1190 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
1191 .rx_queue_start = ngbe_dev_rx_queue_start,
1192 .rx_queue_stop = ngbe_dev_rx_queue_stop,
1193 .tx_queue_start = ngbe_dev_tx_queue_start,
1194 .tx_queue_stop = ngbe_dev_tx_queue_stop,
1195 .rx_queue_setup = ngbe_dev_rx_queue_setup,
1196 .rx_queue_release = ngbe_dev_rx_queue_release,
1197 .tx_queue_setup = ngbe_dev_tx_queue_setup,
1198 .tx_queue_release = ngbe_dev_tx_queue_release,
1199 .rx_burst_mode_get = ngbe_rx_burst_mode_get,
1200 .tx_burst_mode_get = ngbe_tx_burst_mode_get,
1203 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1204 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1205 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1207 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1208 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1210 #ifdef RTE_ETHDEV_DEBUG_RX
1211 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1213 #ifdef RTE_ETHDEV_DEBUG_TX
1214 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);