1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_memory.h>
16 #include <rte_alarm.h>
18 #include "txgbe_logs.h"
19 #include "base/txgbe.h"
20 #include "txgbe_ethdev.h"
21 #include "txgbe_rxtx.h"
23 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
24 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
25 static int txgbe_dev_close(struct rte_eth_dev *dev);
26 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
27 int wait_to_complete);
29 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
30 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
31 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
32 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
33 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
34 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
35 struct rte_intr_handle *handle);
36 static void txgbe_dev_interrupt_handler(void *param);
37 static void txgbe_dev_interrupt_delayed_handler(void *param);
38 static void txgbe_configure_msix(struct rte_eth_dev *dev);
41 * The set of PCI devices this driver supports
43 static const struct rte_pci_id pci_id_txgbe_map[] = {
44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
45 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
46 { .vendor_id = 0, /* sentinel */ },
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50 .nb_max = TXGBE_RING_DESC_MAX,
51 .nb_min = TXGBE_RING_DESC_MIN,
52 .nb_align = TXGBE_RXD_ALIGN,
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56 .nb_max = TXGBE_RING_DESC_MAX,
57 .nb_min = TXGBE_RING_DESC_MIN,
58 .nb_align = TXGBE_TXD_ALIGN,
59 .nb_seg_max = TXGBE_TX_MAX_SEG,
60 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
63 static const struct eth_dev_ops txgbe_eth_dev_ops;
66 txgbe_is_sfp(struct txgbe_hw *hw)
68 switch (hw->phy.type) {
69 case txgbe_phy_sfp_avago:
70 case txgbe_phy_sfp_ftl:
71 case txgbe_phy_sfp_intel:
72 case txgbe_phy_sfp_unknown:
73 case txgbe_phy_sfp_tyco_passive:
74 case txgbe_phy_sfp_unknown_passive:
82 txgbe_enable_intr(struct rte_eth_dev *dev)
84 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
85 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
87 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
88 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
89 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
94 txgbe_disable_intr(struct txgbe_hw *hw)
96 PMD_INIT_FUNC_TRACE();
98 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
99 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
100 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
105 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
107 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
108 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
109 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
110 const struct rte_memzone *mz;
114 PMD_INIT_FUNC_TRACE();
116 eth_dev->dev_ops = &txgbe_eth_dev_ops;
118 rte_eth_copy_pci_info(eth_dev, pci_dev);
120 /* Vendor and Device ID need to be set before init of shared code */
121 hw->device_id = pci_dev->id.device_id;
122 hw->vendor_id = pci_dev->id.vendor_id;
123 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
124 hw->allow_unsupported_sfp = 1;
126 /* Reserve memory for interrupt status block */
127 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
128 16, TXGBE_ALIGN, SOCKET_ID_ANY);
132 hw->isb_dma = TMZ_PADDR(mz);
133 hw->isb_mem = TMZ_VADDR(mz);
135 /* Initialize the shared code (base driver) */
136 err = txgbe_init_shared_code(hw);
138 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
142 err = hw->rom.init_params(hw);
144 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
148 /* Make sure we have a good EEPROM before we read from it */
149 err = hw->rom.validate_checksum(hw, &csum);
151 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
155 err = hw->mac.init_hw(hw);
158 * Devices with copper phys will fail to initialise if txgbe_init_hw()
159 * is called too soon after the kernel driver unbinding/binding occurs.
160 * The failure occurs in txgbe_identify_phy() for all devices,
161 * but for non-copper devies, txgbe_identify_sfp_module() is
162 * also called. See txgbe_identify_phy(). The reason for the
163 * failure is not known, and only occuts when virtualisation features
164 * are disabled in the bios. A delay of 200ms was found to be enough by
165 * trial-and-error, and is doubled to be safe.
167 if (err && hw->phy.media_type == txgbe_media_type_copper) {
169 err = hw->mac.init_hw(hw);
172 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
175 if (err == TXGBE_ERR_EEPROM_VERSION) {
176 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
177 "LOM. Please be aware there may be issues associated "
178 "with your hardware.");
179 PMD_INIT_LOG(ERR, "If you are experiencing problems "
180 "please contact your hardware representative "
181 "who provided you with this hardware.");
182 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
183 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
186 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
190 /* disable interrupt */
191 txgbe_disable_intr(hw);
193 /* Allocate memory for storing MAC addresses */
194 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
195 hw->mac.num_rar_entries, 0);
196 if (eth_dev->data->mac_addrs == NULL) {
198 "Failed to allocate %u bytes needed to store "
200 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
204 /* Copy the permanent MAC address */
205 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
206 ð_dev->data->mac_addrs[0]);
208 /* Allocate memory for storing hash filter MAC addresses */
209 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
210 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
211 if (eth_dev->data->hash_mac_addrs == NULL) {
213 "Failed to allocate %d bytes needed to store MAC addresses",
214 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
218 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
219 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
220 (int)hw->mac.type, (int)hw->phy.type,
221 (int)hw->phy.sfp_type);
223 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
224 (int)hw->mac.type, (int)hw->phy.type);
226 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
227 eth_dev->data->port_id, pci_dev->id.vendor_id,
228 pci_dev->id.device_id);
230 rte_intr_callback_register(intr_handle,
231 txgbe_dev_interrupt_handler, eth_dev);
233 /* enable uio/vfio intr/eventfd mapping */
234 rte_intr_enable(intr_handle);
236 /* enable support intr */
237 txgbe_enable_intr(eth_dev);
243 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
245 PMD_INIT_FUNC_TRACE();
247 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
250 txgbe_dev_close(eth_dev);
256 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
257 struct rte_pci_device *pci_dev)
259 struct rte_eth_dev *pf_ethdev;
260 struct rte_eth_devargs eth_da;
263 if (pci_dev->device.devargs) {
264 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
269 memset(ð_da, 0, sizeof(eth_da));
272 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
273 sizeof(struct txgbe_adapter),
274 eth_dev_pci_specific_init, pci_dev,
275 eth_txgbe_dev_init, NULL);
277 if (retval || eth_da.nb_representor_ports < 1)
280 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
281 if (pf_ethdev == NULL)
287 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
289 struct rte_eth_dev *ethdev;
291 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
295 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
298 static struct rte_pci_driver rte_txgbe_pmd = {
299 .id_table = pci_id_txgbe_map,
300 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
301 RTE_PCI_DRV_INTR_LSC,
302 .probe = eth_txgbe_pci_probe,
303 .remove = eth_txgbe_pci_remove,
307 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
309 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
314 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
317 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
323 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
324 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
325 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
326 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
331 txgbe_check_mq_mode(struct rte_eth_dev *dev)
333 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
334 uint16_t nb_rx_q = dev->data->nb_rx_queues;
335 uint16_t nb_tx_q = dev->data->nb_tx_queues;
337 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
338 /* check multi-queue mode */
339 switch (dev_conf->rxmode.mq_mode) {
340 case ETH_MQ_RX_VMDQ_DCB:
341 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
343 case ETH_MQ_RX_VMDQ_DCB_RSS:
344 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
345 PMD_INIT_LOG(ERR, "SRIOV active,"
346 " unsupported mq_mode rx %d.",
347 dev_conf->rxmode.mq_mode);
350 case ETH_MQ_RX_VMDQ_RSS:
351 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
352 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
353 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
354 PMD_INIT_LOG(ERR, "SRIOV is active,"
355 " invalid queue number"
356 " for VMDQ RSS, allowed"
357 " value are 1, 2 or 4.");
361 case ETH_MQ_RX_VMDQ_ONLY:
363 /* if nothing mq mode configure, use default scheme */
364 dev->data->dev_conf.rxmode.mq_mode =
367 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
368 /* SRIOV only works in VMDq enable mode */
369 PMD_INIT_LOG(ERR, "SRIOV is active,"
370 " wrong mq_mode rx %d.",
371 dev_conf->rxmode.mq_mode);
375 switch (dev_conf->txmode.mq_mode) {
376 case ETH_MQ_TX_VMDQ_DCB:
377 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
378 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
380 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
381 dev->data->dev_conf.txmode.mq_mode =
386 /* check valid queue number */
387 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
388 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
389 PMD_INIT_LOG(ERR, "SRIOV is active,"
390 " nb_rx_q=%d nb_tx_q=%d queue number"
391 " must be less than or equal to %d.",
393 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
397 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
398 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
402 /* check configuration for vmdb+dcb mode */
403 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
404 const struct rte_eth_vmdq_dcb_conf *conf;
406 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
407 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
408 TXGBE_VMDQ_DCB_NB_QUEUES);
411 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
412 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
413 conf->nb_queue_pools == ETH_32_POOLS)) {
414 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
415 " nb_queue_pools must be %d or %d.",
416 ETH_16_POOLS, ETH_32_POOLS);
420 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
421 const struct rte_eth_vmdq_dcb_tx_conf *conf;
423 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
424 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
425 TXGBE_VMDQ_DCB_NB_QUEUES);
428 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
429 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
430 conf->nb_queue_pools == ETH_32_POOLS)) {
431 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
432 " nb_queue_pools != %d and"
433 " nb_queue_pools != %d.",
434 ETH_16_POOLS, ETH_32_POOLS);
439 /* For DCB mode check our configuration before we go further */
440 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
441 const struct rte_eth_dcb_rx_conf *conf;
443 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
444 if (!(conf->nb_tcs == ETH_4_TCS ||
445 conf->nb_tcs == ETH_8_TCS)) {
446 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
447 " and nb_tcs != %d.",
448 ETH_4_TCS, ETH_8_TCS);
453 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
454 const struct rte_eth_dcb_tx_conf *conf;
456 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
457 if (!(conf->nb_tcs == ETH_4_TCS ||
458 conf->nb_tcs == ETH_8_TCS)) {
459 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
460 " and nb_tcs != %d.",
461 ETH_4_TCS, ETH_8_TCS);
470 txgbe_dev_configure(struct rte_eth_dev *dev)
472 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
473 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
476 PMD_INIT_FUNC_TRACE();
478 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
479 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
481 /* multiple queue mode checking */
482 ret = txgbe_check_mq_mode(dev);
484 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
489 /* set flag to update link status after init */
490 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
493 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
494 * allocation Rx preconditions we will reset it.
496 adapter->rx_bulk_alloc_allowed = true;
502 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
504 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
505 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
508 gpie = rd32(hw, TXGBE_GPIOINTEN);
509 gpie |= TXGBE_GPIOBIT_6;
510 wr32(hw, TXGBE_GPIOINTEN, gpie);
511 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
515 * Set device link up: enable tx.
518 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
520 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
522 if (hw->phy.media_type == txgbe_media_type_copper) {
523 /* Turn on the copper */
524 hw->phy.set_phy_power(hw, true);
526 /* Turn on the laser */
527 hw->mac.enable_tx_laser(hw);
528 txgbe_dev_link_update(dev, 0);
535 * Set device link down: disable tx.
538 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
540 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
542 if (hw->phy.media_type == txgbe_media_type_copper) {
543 /* Turn off the copper */
544 hw->phy.set_phy_power(hw, false);
546 /* Turn off the laser */
547 hw->mac.disable_tx_laser(hw);
548 txgbe_dev_link_update(dev, 0);
555 * Reset and stop device.
558 txgbe_dev_close(struct rte_eth_dev *dev)
560 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
561 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
565 PMD_INIT_FUNC_TRACE();
567 /* disable uio intr before callback unregister */
568 rte_intr_disable(intr_handle);
571 ret = rte_intr_callback_unregister(intr_handle,
572 txgbe_dev_interrupt_handler, dev);
573 if (ret >= 0 || ret == -ENOENT) {
575 } else if (ret != -EAGAIN) {
577 "intr callback unregister failed: %d",
581 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
583 /* cancel the delay handler before remove dev */
584 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
586 rte_free(dev->data->mac_addrs);
587 dev->data->mac_addrs = NULL;
589 rte_free(dev->data->hash_mac_addrs);
590 dev->data->hash_mac_addrs = NULL;
596 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
598 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
599 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
601 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
602 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
603 dev_info->min_rx_bufsize = 1024;
604 dev_info->max_rx_pktlen = 15872;
605 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
606 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
607 dev_info->max_vfs = pci_dev->max_vfs;
608 dev_info->max_vmdq_pools = ETH_64_POOLS;
609 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
610 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
611 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
612 dev_info->rx_queue_offload_capa);
613 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
614 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
616 dev_info->default_rxconf = (struct rte_eth_rxconf) {
618 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
619 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
620 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
622 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
627 dev_info->default_txconf = (struct rte_eth_txconf) {
629 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
630 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
631 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
633 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
637 dev_info->rx_desc_lim = rx_desc_lim;
638 dev_info->tx_desc_lim = tx_desc_lim;
640 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
641 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
642 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
644 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
645 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
647 /* Driver-preferred Rx/Tx parameters */
648 dev_info->default_rxportconf.burst_size = 32;
649 dev_info->default_txportconf.burst_size = 32;
650 dev_info->default_rxportconf.nb_queues = 1;
651 dev_info->default_txportconf.nb_queues = 1;
652 dev_info->default_rxportconf.ring_size = 256;
653 dev_info->default_txportconf.ring_size = 256;
659 txgbe_dev_setup_link_alarm_handler(void *param)
661 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
662 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
663 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
665 bool autoneg = false;
667 speed = hw->phy.autoneg_advertised;
669 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
671 hw->mac.setup_link(hw, speed, true);
673 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
676 /* return 0 means link status changed, -1 means not changed */
678 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
679 int wait_to_complete)
681 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
682 struct rte_eth_link link;
683 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
684 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
689 memset(&link, 0, sizeof(link));
690 link.link_status = ETH_LINK_DOWN;
691 link.link_speed = ETH_SPEED_NUM_NONE;
692 link.link_duplex = ETH_LINK_HALF_DUPLEX;
693 link.link_autoneg = ETH_LINK_AUTONEG;
695 hw->mac.get_link_status = true;
697 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
698 return rte_eth_linkstatus_set(dev, &link);
700 /* check if it needs to wait to complete, if lsc interrupt is enabled */
701 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
704 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
707 link.link_speed = ETH_SPEED_NUM_100M;
708 link.link_duplex = ETH_LINK_FULL_DUPLEX;
709 return rte_eth_linkstatus_set(dev, &link);
713 if (hw->phy.media_type == txgbe_media_type_fiber) {
714 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
715 rte_eal_alarm_set(10,
716 txgbe_dev_setup_link_alarm_handler, dev);
718 return rte_eth_linkstatus_set(dev, &link);
721 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
722 link.link_status = ETH_LINK_UP;
723 link.link_duplex = ETH_LINK_FULL_DUPLEX;
725 switch (link_speed) {
727 case TXGBE_LINK_SPEED_UNKNOWN:
728 link.link_duplex = ETH_LINK_FULL_DUPLEX;
729 link.link_speed = ETH_SPEED_NUM_100M;
732 case TXGBE_LINK_SPEED_100M_FULL:
733 link.link_speed = ETH_SPEED_NUM_100M;
736 case TXGBE_LINK_SPEED_1GB_FULL:
737 link.link_speed = ETH_SPEED_NUM_1G;
740 case TXGBE_LINK_SPEED_2_5GB_FULL:
741 link.link_speed = ETH_SPEED_NUM_2_5G;
744 case TXGBE_LINK_SPEED_5GB_FULL:
745 link.link_speed = ETH_SPEED_NUM_5G;
748 case TXGBE_LINK_SPEED_10GB_FULL:
749 link.link_speed = ETH_SPEED_NUM_10G;
753 return rte_eth_linkstatus_set(dev, &link);
757 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
759 return txgbe_dev_link_update_share(dev, wait_to_complete);
763 * It clears the interrupt causes and enables the interrupt.
764 * It will be called once only during nic initialized.
767 * Pointer to struct rte_eth_dev.
772 * - On success, zero.
773 * - On failure, a negative value.
776 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
778 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
780 txgbe_dev_link_status_print(dev);
782 intr->mask_misc |= TXGBE_ICRMISC_LSC;
784 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
790 * It clears the interrupt causes and enables the interrupt.
791 * It will be called once only during nic initialized.
794 * Pointer to struct rte_eth_dev.
797 * - On success, zero.
798 * - On failure, a negative value.
801 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
803 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
805 intr->mask[0] |= TXGBE_ICR_MASK;
806 intr->mask[1] |= TXGBE_ICR_MASK;
812 * It clears the interrupt causes and enables the interrupt.
813 * It will be called once only during nic initialized.
816 * Pointer to struct rte_eth_dev.
819 * - On success, zero.
820 * - On failure, a negative value.
823 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
825 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
827 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
833 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
836 * Pointer to struct rte_eth_dev.
839 * - On success, zero.
840 * - On failure, a negative value.
843 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
846 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
847 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
849 /* clear all cause mask */
850 txgbe_disable_intr(hw);
852 /* read-on-clear nic registers here */
853 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
854 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
858 /* set flag for async link update */
859 if (eicr & TXGBE_ICRMISC_LSC)
860 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
862 if (eicr & TXGBE_ICRMISC_VFMBX)
863 intr->flags |= TXGBE_FLAG_MAILBOX;
865 if (eicr & TXGBE_ICRMISC_LNKSEC)
866 intr->flags |= TXGBE_FLAG_MACSEC;
868 if (eicr & TXGBE_ICRMISC_GPIO)
869 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
875 * It gets and then prints the link status.
878 * Pointer to struct rte_eth_dev.
881 * - On success, zero.
882 * - On failure, a negative value.
885 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
887 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
888 struct rte_eth_link link;
890 rte_eth_linkstatus_get(dev, &link);
892 if (link.link_status) {
893 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
894 (int)(dev->data->port_id),
895 (unsigned int)link.link_speed,
896 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
897 "full-duplex" : "half-duplex");
899 PMD_INIT_LOG(INFO, " Port %d: Link Down",
900 (int)(dev->data->port_id));
902 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
903 pci_dev->addr.domain,
906 pci_dev->addr.function);
910 * It executes link_update after knowing an interrupt occurred.
913 * Pointer to struct rte_eth_dev.
916 * - On success, zero.
917 * - On failure, a negative value.
920 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
921 struct rte_intr_handle *intr_handle)
923 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
925 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
927 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
929 if (intr->flags & TXGBE_FLAG_MAILBOX)
930 intr->flags &= ~TXGBE_FLAG_MAILBOX;
932 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
933 hw->phy.handle_lasi(hw);
934 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
937 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
938 struct rte_eth_link link;
940 /*get the link status before link update, for predicting later*/
941 rte_eth_linkstatus_get(dev, &link);
943 txgbe_dev_link_update(dev, 0);
946 if (!link.link_status)
947 /* handle it 1 sec later, wait it being stable */
948 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
951 /* handle it 4 sec later, wait it being stable */
952 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
954 txgbe_dev_link_status_print(dev);
955 if (rte_eal_alarm_set(timeout * 1000,
956 txgbe_dev_interrupt_delayed_handler,
958 PMD_DRV_LOG(ERR, "Error setting alarm");
960 /* remember original mask */
961 intr->mask_misc_orig = intr->mask_misc;
962 /* only disable lsc interrupt */
963 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
967 PMD_DRV_LOG(DEBUG, "enable intr immediately");
968 txgbe_enable_intr(dev);
969 rte_intr_enable(intr_handle);
975 * Interrupt handler which shall be registered for alarm callback for delayed
976 * handling specific interrupt to wait for the stable nic state. As the
977 * NIC interrupt state is not stable for txgbe after link is just down,
978 * it needs to wait 4 seconds to get the stable status.
981 * Pointer to interrupt handle.
983 * The address of parameter (struct rte_eth_dev *) registered before.
989 txgbe_dev_interrupt_delayed_handler(void *param)
991 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
992 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
993 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
994 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
995 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
998 txgbe_disable_intr(hw);
1000 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1002 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1003 hw->phy.handle_lasi(hw);
1004 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1007 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1008 txgbe_dev_link_update(dev, 0);
1009 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1010 txgbe_dev_link_status_print(dev);
1011 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1015 if (intr->flags & TXGBE_FLAG_MACSEC) {
1016 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1018 intr->flags &= ~TXGBE_FLAG_MACSEC;
1021 /* restore original mask */
1022 intr->mask_misc = intr->mask_misc_orig;
1023 intr->mask_misc_orig = 0;
1025 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1026 txgbe_enable_intr(dev);
1027 rte_intr_enable(intr_handle);
1031 * Interrupt handler triggered by NIC for handling
1032 * specific interrupt.
1035 * Pointer to interrupt handle.
1037 * The address of parameter (struct rte_eth_dev *) registered before.
1043 txgbe_dev_interrupt_handler(void *param)
1045 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1047 txgbe_dev_interrupt_get_status(dev);
1048 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1052 * set the IVAR registers, mapping interrupt causes to vectors
1054 * pointer to txgbe_hw struct
1056 * 0 for Rx, 1 for Tx, -1 for other causes
1058 * queue to map the corresponding interrupt to
1060 * the vector to map to the corresponding queue
1063 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1064 uint8_t queue, uint8_t msix_vector)
1068 if (direction == -1) {
1070 msix_vector |= TXGBE_IVARMISC_VLD;
1072 tmp = rd32(hw, TXGBE_IVARMISC);
1073 tmp &= ~(0xFF << idx);
1074 tmp |= (msix_vector << idx);
1075 wr32(hw, TXGBE_IVARMISC, tmp);
1077 /* rx or tx causes */
1078 /* Workround for ICR lost */
1079 idx = ((16 * (queue & 1)) + (8 * direction));
1080 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1081 tmp &= ~(0xFF << idx);
1082 tmp |= (msix_vector << idx);
1083 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1088 * Sets up the hardware to properly generate MSI-X interrupts
1090 * board private structure
1093 txgbe_configure_msix(struct rte_eth_dev *dev)
1095 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1096 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1097 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1098 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1099 uint32_t vec = TXGBE_MISC_VEC_ID;
1102 /* won't configure msix register if no mapping is done
1103 * between intr vector and event fd
1104 * but if misx has been enabled already, need to configure
1105 * auto clean, auto mask and throttling.
1107 gpie = rd32(hw, TXGBE_GPIE);
1108 if (!rte_intr_dp_is_en(intr_handle) &&
1109 !(gpie & TXGBE_GPIE_MSIX))
1112 if (rte_intr_allow_others(intr_handle)) {
1113 base = TXGBE_RX_VEC_START;
1117 /* setup GPIE for MSI-x mode */
1118 gpie = rd32(hw, TXGBE_GPIE);
1119 gpie |= TXGBE_GPIE_MSIX;
1120 wr32(hw, TXGBE_GPIE, gpie);
1122 /* Populate the IVAR table and set the ITR values to the
1123 * corresponding register.
1125 if (rte_intr_dp_is_en(intr_handle)) {
1126 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1128 /* by default, 1:1 mapping */
1129 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1130 intr_handle->intr_vec[queue_id] = vec;
1131 if (vec < base + intr_handle->nb_efd - 1)
1135 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1137 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1138 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1142 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1143 .dev_configure = txgbe_dev_configure,
1144 .dev_infos_get = txgbe_dev_info_get,
1145 .dev_set_link_up = txgbe_dev_set_link_up,
1146 .dev_set_link_down = txgbe_dev_set_link_down,
1149 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1150 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1151 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1153 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1154 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1156 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1157 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1159 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1160 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1163 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1164 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);