1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37 struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
43 * The set of PCI devices this driver supports
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48 { .vendor_id = 0, /* sentinel */ },
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52 .nb_max = TXGBE_RING_DESC_MAX,
53 .nb_min = TXGBE_RING_DESC_MIN,
54 .nb_align = TXGBE_RXD_ALIGN,
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58 .nb_max = TXGBE_RING_DESC_MAX,
59 .nb_min = TXGBE_RING_DESC_MIN,
60 .nb_align = TXGBE_TXD_ALIGN,
61 .nb_seg_max = TXGBE_TX_MAX_SEG,
62 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
68 txgbe_is_sfp(struct txgbe_hw *hw)
70 switch (hw->phy.type) {
71 case txgbe_phy_sfp_avago:
72 case txgbe_phy_sfp_ftl:
73 case txgbe_phy_sfp_intel:
74 case txgbe_phy_sfp_unknown:
75 case txgbe_phy_sfp_tyco_passive:
76 case txgbe_phy_sfp_unknown_passive:
84 txgbe_pf_reset_hw(struct txgbe_hw *hw)
89 status = hw->mac.reset_hw(hw);
91 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
92 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
93 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
94 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
97 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
103 txgbe_enable_intr(struct rte_eth_dev *dev)
105 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
106 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
108 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
109 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
110 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
115 txgbe_disable_intr(struct txgbe_hw *hw)
117 PMD_INIT_FUNC_TRACE();
119 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
120 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
121 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
126 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
128 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
129 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
130 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
131 const struct rte_memzone *mz;
135 PMD_INIT_FUNC_TRACE();
137 eth_dev->dev_ops = &txgbe_eth_dev_ops;
138 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
139 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
140 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
143 * For secondary processes, we don't initialise any further as primary
144 * has already done this work. Only check we don't need a different
145 * RX and TX function.
147 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
148 struct txgbe_tx_queue *txq;
149 /* TX queue function in primary, set by last queue initialized
150 * Tx queue may not initialized by primary process
152 if (eth_dev->data->tx_queues) {
153 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
154 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
155 txgbe_set_tx_function(eth_dev, txq);
157 /* Use default TX function if we get here */
158 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
159 "Using default TX function.");
162 txgbe_set_rx_function(eth_dev);
167 rte_eth_copy_pci_info(eth_dev, pci_dev);
169 /* Vendor and Device ID need to be set before init of shared code */
170 hw->device_id = pci_dev->id.device_id;
171 hw->vendor_id = pci_dev->id.vendor_id;
172 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
173 hw->allow_unsupported_sfp = 1;
175 /* Reserve memory for interrupt status block */
176 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
177 16, TXGBE_ALIGN, SOCKET_ID_ANY);
181 hw->isb_dma = TMZ_PADDR(mz);
182 hw->isb_mem = TMZ_VADDR(mz);
184 /* Initialize the shared code (base driver) */
185 err = txgbe_init_shared_code(hw);
187 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
191 err = hw->rom.init_params(hw);
193 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
197 /* Make sure we have a good EEPROM before we read from it */
198 err = hw->rom.validate_checksum(hw, &csum);
200 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
204 err = hw->mac.init_hw(hw);
207 * Devices with copper phys will fail to initialise if txgbe_init_hw()
208 * is called too soon after the kernel driver unbinding/binding occurs.
209 * The failure occurs in txgbe_identify_phy() for all devices,
210 * but for non-copper devies, txgbe_identify_sfp_module() is
211 * also called. See txgbe_identify_phy(). The reason for the
212 * failure is not known, and only occuts when virtualisation features
213 * are disabled in the bios. A delay of 200ms was found to be enough by
214 * trial-and-error, and is doubled to be safe.
216 if (err && hw->phy.media_type == txgbe_media_type_copper) {
218 err = hw->mac.init_hw(hw);
221 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
224 if (err == TXGBE_ERR_EEPROM_VERSION) {
225 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
226 "LOM. Please be aware there may be issues associated "
227 "with your hardware.");
228 PMD_INIT_LOG(ERR, "If you are experiencing problems "
229 "please contact your hardware representative "
230 "who provided you with this hardware.");
231 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
232 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
235 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
239 /* disable interrupt */
240 txgbe_disable_intr(hw);
242 /* Allocate memory for storing MAC addresses */
243 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
244 hw->mac.num_rar_entries, 0);
245 if (eth_dev->data->mac_addrs == NULL) {
247 "Failed to allocate %u bytes needed to store "
249 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
253 /* Copy the permanent MAC address */
254 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
255 ð_dev->data->mac_addrs[0]);
257 /* Allocate memory for storing hash filter MAC addresses */
258 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
259 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
260 if (eth_dev->data->hash_mac_addrs == NULL) {
262 "Failed to allocate %d bytes needed to store MAC addresses",
263 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
267 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
268 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
269 (int)hw->mac.type, (int)hw->phy.type,
270 (int)hw->phy.sfp_type);
272 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
273 (int)hw->mac.type, (int)hw->phy.type);
275 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
276 eth_dev->data->port_id, pci_dev->id.vendor_id,
277 pci_dev->id.device_id);
279 rte_intr_callback_register(intr_handle,
280 txgbe_dev_interrupt_handler, eth_dev);
282 /* enable uio/vfio intr/eventfd mapping */
283 rte_intr_enable(intr_handle);
285 /* enable support intr */
286 txgbe_enable_intr(eth_dev);
292 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
294 PMD_INIT_FUNC_TRACE();
296 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
299 txgbe_dev_close(eth_dev);
305 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
306 struct rte_pci_device *pci_dev)
308 struct rte_eth_dev *pf_ethdev;
309 struct rte_eth_devargs eth_da;
312 if (pci_dev->device.devargs) {
313 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
318 memset(ð_da, 0, sizeof(eth_da));
321 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
322 sizeof(struct txgbe_adapter),
323 eth_dev_pci_specific_init, pci_dev,
324 eth_txgbe_dev_init, NULL);
326 if (retval || eth_da.nb_representor_ports < 1)
329 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
330 if (pf_ethdev == NULL)
336 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
338 struct rte_eth_dev *ethdev;
340 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
344 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
347 static struct rte_pci_driver rte_txgbe_pmd = {
348 .id_table = pci_id_txgbe_map,
349 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
350 RTE_PCI_DRV_INTR_LSC,
351 .probe = eth_txgbe_pci_probe,
352 .remove = eth_txgbe_pci_remove,
356 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
358 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
363 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
366 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
372 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
373 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
374 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
375 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
380 txgbe_check_mq_mode(struct rte_eth_dev *dev)
382 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
383 uint16_t nb_rx_q = dev->data->nb_rx_queues;
384 uint16_t nb_tx_q = dev->data->nb_tx_queues;
386 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
387 /* check multi-queue mode */
388 switch (dev_conf->rxmode.mq_mode) {
389 case ETH_MQ_RX_VMDQ_DCB:
390 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
392 case ETH_MQ_RX_VMDQ_DCB_RSS:
393 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
394 PMD_INIT_LOG(ERR, "SRIOV active,"
395 " unsupported mq_mode rx %d.",
396 dev_conf->rxmode.mq_mode);
399 case ETH_MQ_RX_VMDQ_RSS:
400 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
401 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
402 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
403 PMD_INIT_LOG(ERR, "SRIOV is active,"
404 " invalid queue number"
405 " for VMDQ RSS, allowed"
406 " value are 1, 2 or 4.");
410 case ETH_MQ_RX_VMDQ_ONLY:
412 /* if nothing mq mode configure, use default scheme */
413 dev->data->dev_conf.rxmode.mq_mode =
416 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
417 /* SRIOV only works in VMDq enable mode */
418 PMD_INIT_LOG(ERR, "SRIOV is active,"
419 " wrong mq_mode rx %d.",
420 dev_conf->rxmode.mq_mode);
424 switch (dev_conf->txmode.mq_mode) {
425 case ETH_MQ_TX_VMDQ_DCB:
426 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
427 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
429 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
430 dev->data->dev_conf.txmode.mq_mode =
435 /* check valid queue number */
436 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
437 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
438 PMD_INIT_LOG(ERR, "SRIOV is active,"
439 " nb_rx_q=%d nb_tx_q=%d queue number"
440 " must be less than or equal to %d.",
442 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
446 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
447 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
451 /* check configuration for vmdb+dcb mode */
452 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
453 const struct rte_eth_vmdq_dcb_conf *conf;
455 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
456 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
457 TXGBE_VMDQ_DCB_NB_QUEUES);
460 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
461 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
462 conf->nb_queue_pools == ETH_32_POOLS)) {
463 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
464 " nb_queue_pools must be %d or %d.",
465 ETH_16_POOLS, ETH_32_POOLS);
469 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
470 const struct rte_eth_vmdq_dcb_tx_conf *conf;
472 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
473 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
474 TXGBE_VMDQ_DCB_NB_QUEUES);
477 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
478 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
479 conf->nb_queue_pools == ETH_32_POOLS)) {
480 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
481 " nb_queue_pools != %d and"
482 " nb_queue_pools != %d.",
483 ETH_16_POOLS, ETH_32_POOLS);
488 /* For DCB mode check our configuration before we go further */
489 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
490 const struct rte_eth_dcb_rx_conf *conf;
492 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
493 if (!(conf->nb_tcs == ETH_4_TCS ||
494 conf->nb_tcs == ETH_8_TCS)) {
495 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
496 " and nb_tcs != %d.",
497 ETH_4_TCS, ETH_8_TCS);
502 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
503 const struct rte_eth_dcb_tx_conf *conf;
505 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
506 if (!(conf->nb_tcs == ETH_4_TCS ||
507 conf->nb_tcs == ETH_8_TCS)) {
508 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
509 " and nb_tcs != %d.",
510 ETH_4_TCS, ETH_8_TCS);
519 txgbe_dev_configure(struct rte_eth_dev *dev)
521 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
522 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
525 PMD_INIT_FUNC_TRACE();
527 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
528 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
530 /* multiple queue mode checking */
531 ret = txgbe_check_mq_mode(dev);
533 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
538 /* set flag to update link status after init */
539 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
542 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
543 * allocation Rx preconditions we will reset it.
545 adapter->rx_bulk_alloc_allowed = true;
551 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
553 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
554 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
557 gpie = rd32(hw, TXGBE_GPIOINTEN);
558 gpie |= TXGBE_GPIOBIT_6;
559 wr32(hw, TXGBE_GPIOINTEN, gpie);
560 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
564 * Configure device link speed and setup link.
565 * It returns 0 on success.
568 txgbe_dev_start(struct rte_eth_dev *dev)
570 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
571 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
572 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
573 uint32_t intr_vector = 0;
575 bool link_up = false, negotiate = 0;
577 uint32_t allowed_speeds = 0;
579 uint32_t *link_speeds;
581 PMD_INIT_FUNC_TRACE();
583 /* TXGBE devices don't support:
584 * - half duplex (checked afterwards for valid speeds)
585 * - fixed speed: TODO implement
587 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
589 "Invalid link_speeds for port %u, fix speed not supported",
594 /* Stop the link setup handler before resetting the HW. */
595 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
597 /* disable uio/vfio intr/eventfd mapping */
598 rte_intr_disable(intr_handle);
601 hw->adapter_stopped = 0;
604 /* reinitialize adapter
605 * this calls reset and start
607 hw->nb_rx_queues = dev->data->nb_rx_queues;
608 hw->nb_tx_queues = dev->data->nb_tx_queues;
609 status = txgbe_pf_reset_hw(hw);
612 hw->mac.start_hw(hw);
613 hw->mac.get_link_status = true;
615 txgbe_dev_phy_intr_setup(dev);
617 /* check and configure queue intr-vector mapping */
618 if ((rte_intr_cap_multiple(intr_handle) ||
619 !RTE_ETH_DEV_SRIOV(dev).active) &&
620 dev->data->dev_conf.intr_conf.rxq != 0) {
621 intr_vector = dev->data->nb_rx_queues;
622 if (rte_intr_efd_enable(intr_handle, intr_vector))
626 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
627 intr_handle->intr_vec =
628 rte_zmalloc("intr_vec",
629 dev->data->nb_rx_queues * sizeof(int), 0);
630 if (intr_handle->intr_vec == NULL) {
631 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
632 " intr_vec", dev->data->nb_rx_queues);
637 /* confiugre msix for sleep until rx interrupt */
638 txgbe_configure_msix(dev);
640 /* initialize transmission unit */
641 txgbe_dev_tx_init(dev);
643 /* This can fail when allocating mbufs for descriptor rings */
644 err = txgbe_dev_rx_init(dev);
646 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
650 err = txgbe_dev_rxtx_start(dev);
652 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
656 /* Skip link setup if loopback mode is enabled. */
657 if (hw->mac.type == txgbe_mac_raptor &&
658 dev->data->dev_conf.lpbk_mode)
659 goto skip_link_setup;
661 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
662 err = hw->mac.setup_sfp(hw);
667 if (hw->phy.media_type == txgbe_media_type_copper) {
668 /* Turn on the copper */
669 hw->phy.set_phy_power(hw, true);
671 /* Turn on the laser */
672 hw->mac.enable_tx_laser(hw);
675 err = hw->mac.check_link(hw, &speed, &link_up, 0);
678 dev->data->dev_link.link_status = link_up;
680 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
684 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
687 link_speeds = &dev->data->dev_conf.link_speeds;
688 if (*link_speeds & ~allowed_speeds) {
689 PMD_INIT_LOG(ERR, "Invalid link setting");
694 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
695 speed = (TXGBE_LINK_SPEED_100M_FULL |
696 TXGBE_LINK_SPEED_1GB_FULL |
697 TXGBE_LINK_SPEED_10GB_FULL);
699 if (*link_speeds & ETH_LINK_SPEED_10G)
700 speed |= TXGBE_LINK_SPEED_10GB_FULL;
701 if (*link_speeds & ETH_LINK_SPEED_5G)
702 speed |= TXGBE_LINK_SPEED_5GB_FULL;
703 if (*link_speeds & ETH_LINK_SPEED_2_5G)
704 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
705 if (*link_speeds & ETH_LINK_SPEED_1G)
706 speed |= TXGBE_LINK_SPEED_1GB_FULL;
707 if (*link_speeds & ETH_LINK_SPEED_100M)
708 speed |= TXGBE_LINK_SPEED_100M_FULL;
711 err = hw->mac.setup_link(hw, speed, link_up);
717 if (rte_intr_allow_others(intr_handle)) {
718 /* check if lsc interrupt is enabled */
719 if (dev->data->dev_conf.intr_conf.lsc != 0)
720 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
722 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
723 txgbe_dev_macsec_interrupt_setup(dev);
724 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
726 rte_intr_callback_unregister(intr_handle,
727 txgbe_dev_interrupt_handler, dev);
728 if (dev->data->dev_conf.intr_conf.lsc != 0)
729 PMD_INIT_LOG(INFO, "lsc won't enable because of"
730 " no intr multiplex");
733 /* check if rxq interrupt is enabled */
734 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
735 rte_intr_dp_is_en(intr_handle))
736 txgbe_dev_rxq_interrupt_setup(dev);
738 /* enable uio/vfio intr/eventfd mapping */
739 rte_intr_enable(intr_handle);
741 /* resume enabled intr since hw reset */
742 txgbe_enable_intr(dev);
745 * Update link status right before return, because it may
746 * start link configuration process in a separate thread.
748 txgbe_dev_link_update(dev, 0);
750 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
755 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
756 txgbe_dev_clear_queues(dev);
761 * Stop device: disable rx and tx functions to allow for reconfiguring.
764 txgbe_dev_stop(struct rte_eth_dev *dev)
766 struct rte_eth_link link;
767 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
768 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
769 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
771 if (hw->adapter_stopped)
774 PMD_INIT_FUNC_TRACE();
776 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
778 /* disable interrupts */
779 txgbe_disable_intr(hw);
782 txgbe_pf_reset_hw(hw);
783 hw->adapter_stopped = 0;
788 if (hw->phy.media_type == txgbe_media_type_copper) {
789 /* Turn off the copper */
790 hw->phy.set_phy_power(hw, false);
792 /* Turn off the laser */
793 hw->mac.disable_tx_laser(hw);
796 txgbe_dev_clear_queues(dev);
798 /* Clear stored conf */
799 dev->data->scattered_rx = 0;
802 /* Clear recorded link status */
803 memset(&link, 0, sizeof(link));
804 rte_eth_linkstatus_set(dev, &link);
806 if (!rte_intr_allow_others(intr_handle))
807 /* resume to the default handler */
808 rte_intr_callback_register(intr_handle,
809 txgbe_dev_interrupt_handler,
812 /* Clean datapath event and queue/vec mapping */
813 rte_intr_efd_disable(intr_handle);
814 if (intr_handle->intr_vec != NULL) {
815 rte_free(intr_handle->intr_vec);
816 intr_handle->intr_vec = NULL;
819 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
821 hw->adapter_stopped = true;
822 dev->data->dev_started = 0;
828 * Set device link up: enable tx.
831 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
833 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
835 if (hw->phy.media_type == txgbe_media_type_copper) {
836 /* Turn on the copper */
837 hw->phy.set_phy_power(hw, true);
839 /* Turn on the laser */
840 hw->mac.enable_tx_laser(hw);
841 txgbe_dev_link_update(dev, 0);
848 * Set device link down: disable tx.
851 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
853 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
855 if (hw->phy.media_type == txgbe_media_type_copper) {
856 /* Turn off the copper */
857 hw->phy.set_phy_power(hw, false);
859 /* Turn off the laser */
860 hw->mac.disable_tx_laser(hw);
861 txgbe_dev_link_update(dev, 0);
868 * Reset and stop device.
871 txgbe_dev_close(struct rte_eth_dev *dev)
873 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
874 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
875 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
879 PMD_INIT_FUNC_TRACE();
881 txgbe_pf_reset_hw(hw);
883 ret = txgbe_dev_stop(dev);
885 txgbe_dev_free_queues(dev);
887 /* reprogram the RAR[0] in case user changed it. */
888 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
890 /* disable uio intr before callback unregister */
891 rte_intr_disable(intr_handle);
894 ret = rte_intr_callback_unregister(intr_handle,
895 txgbe_dev_interrupt_handler, dev);
896 if (ret >= 0 || ret == -ENOENT) {
898 } else if (ret != -EAGAIN) {
900 "intr callback unregister failed: %d",
904 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
906 /* cancel the delay handler before remove dev */
907 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
909 rte_free(dev->data->mac_addrs);
910 dev->data->mac_addrs = NULL;
912 rte_free(dev->data->hash_mac_addrs);
913 dev->data->hash_mac_addrs = NULL;
922 txgbe_dev_reset(struct rte_eth_dev *dev)
926 /* When a DPDK PMD PF begin to reset PF port, it should notify all
927 * its VF to make them align with it. The detailed notification
928 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
929 * To avoid unexpected behavior in VF, currently reset of PF with
930 * SR-IOV activation is not supported. It might be supported later.
932 if (dev->data->sriov.active)
935 ret = eth_txgbe_dev_uninit(dev);
939 ret = eth_txgbe_dev_init(dev, NULL);
945 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
947 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
948 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
950 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
951 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
952 dev_info->min_rx_bufsize = 1024;
953 dev_info->max_rx_pktlen = 15872;
954 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
955 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
956 dev_info->max_vfs = pci_dev->max_vfs;
957 dev_info->max_vmdq_pools = ETH_64_POOLS;
958 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
959 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
960 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
961 dev_info->rx_queue_offload_capa);
962 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
963 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
965 dev_info->default_rxconf = (struct rte_eth_rxconf) {
967 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
968 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
969 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
971 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
976 dev_info->default_txconf = (struct rte_eth_txconf) {
978 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
979 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
980 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
982 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
986 dev_info->rx_desc_lim = rx_desc_lim;
987 dev_info->tx_desc_lim = tx_desc_lim;
989 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
990 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
991 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
993 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
994 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
996 /* Driver-preferred Rx/Tx parameters */
997 dev_info->default_rxportconf.burst_size = 32;
998 dev_info->default_txportconf.burst_size = 32;
999 dev_info->default_rxportconf.nb_queues = 1;
1000 dev_info->default_txportconf.nb_queues = 1;
1001 dev_info->default_rxportconf.ring_size = 256;
1002 dev_info->default_txportconf.ring_size = 256;
1008 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1010 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
1011 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
1012 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
1013 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
1014 return txgbe_get_supported_ptypes();
1020 txgbe_dev_setup_link_alarm_handler(void *param)
1022 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1023 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1024 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1026 bool autoneg = false;
1028 speed = hw->phy.autoneg_advertised;
1030 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1032 hw->mac.setup_link(hw, speed, true);
1034 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1037 /* return 0 means link status changed, -1 means not changed */
1039 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
1040 int wait_to_complete)
1042 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1043 struct rte_eth_link link;
1044 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
1045 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1050 memset(&link, 0, sizeof(link));
1051 link.link_status = ETH_LINK_DOWN;
1052 link.link_speed = ETH_SPEED_NUM_NONE;
1053 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1054 link.link_autoneg = ETH_LINK_AUTONEG;
1056 hw->mac.get_link_status = true;
1058 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
1059 return rte_eth_linkstatus_set(dev, &link);
1061 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1062 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1065 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1068 link.link_speed = ETH_SPEED_NUM_100M;
1069 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1070 return rte_eth_linkstatus_set(dev, &link);
1074 if (hw->phy.media_type == txgbe_media_type_fiber) {
1075 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
1076 rte_eal_alarm_set(10,
1077 txgbe_dev_setup_link_alarm_handler, dev);
1079 return rte_eth_linkstatus_set(dev, &link);
1082 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1083 link.link_status = ETH_LINK_UP;
1084 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1086 switch (link_speed) {
1088 case TXGBE_LINK_SPEED_UNKNOWN:
1089 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1090 link.link_speed = ETH_SPEED_NUM_100M;
1093 case TXGBE_LINK_SPEED_100M_FULL:
1094 link.link_speed = ETH_SPEED_NUM_100M;
1097 case TXGBE_LINK_SPEED_1GB_FULL:
1098 link.link_speed = ETH_SPEED_NUM_1G;
1101 case TXGBE_LINK_SPEED_2_5GB_FULL:
1102 link.link_speed = ETH_SPEED_NUM_2_5G;
1105 case TXGBE_LINK_SPEED_5GB_FULL:
1106 link.link_speed = ETH_SPEED_NUM_5G;
1109 case TXGBE_LINK_SPEED_10GB_FULL:
1110 link.link_speed = ETH_SPEED_NUM_10G;
1114 return rte_eth_linkstatus_set(dev, &link);
1118 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1120 return txgbe_dev_link_update_share(dev, wait_to_complete);
1124 * It clears the interrupt causes and enables the interrupt.
1125 * It will be called once only during nic initialized.
1128 * Pointer to struct rte_eth_dev.
1130 * Enable or Disable.
1133 * - On success, zero.
1134 * - On failure, a negative value.
1137 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1139 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1141 txgbe_dev_link_status_print(dev);
1143 intr->mask_misc |= TXGBE_ICRMISC_LSC;
1145 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1151 * It clears the interrupt causes and enables the interrupt.
1152 * It will be called once only during nic initialized.
1155 * Pointer to struct rte_eth_dev.
1158 * - On success, zero.
1159 * - On failure, a negative value.
1162 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1164 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1166 intr->mask[0] |= TXGBE_ICR_MASK;
1167 intr->mask[1] |= TXGBE_ICR_MASK;
1173 * It clears the interrupt causes and enables the interrupt.
1174 * It will be called once only during nic initialized.
1177 * Pointer to struct rte_eth_dev.
1180 * - On success, zero.
1181 * - On failure, a negative value.
1184 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1186 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1188 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
1194 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
1197 * Pointer to struct rte_eth_dev.
1200 * - On success, zero.
1201 * - On failure, a negative value.
1204 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1207 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1208 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1210 /* clear all cause mask */
1211 txgbe_disable_intr(hw);
1213 /* read-on-clear nic registers here */
1214 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1215 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1219 /* set flag for async link update */
1220 if (eicr & TXGBE_ICRMISC_LSC)
1221 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1223 if (eicr & TXGBE_ICRMISC_VFMBX)
1224 intr->flags |= TXGBE_FLAG_MAILBOX;
1226 if (eicr & TXGBE_ICRMISC_LNKSEC)
1227 intr->flags |= TXGBE_FLAG_MACSEC;
1229 if (eicr & TXGBE_ICRMISC_GPIO)
1230 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
1236 * It gets and then prints the link status.
1239 * Pointer to struct rte_eth_dev.
1242 * - On success, zero.
1243 * - On failure, a negative value.
1246 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
1248 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1249 struct rte_eth_link link;
1251 rte_eth_linkstatus_get(dev, &link);
1253 if (link.link_status) {
1254 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1255 (int)(dev->data->port_id),
1256 (unsigned int)link.link_speed,
1257 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1258 "full-duplex" : "half-duplex");
1260 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1261 (int)(dev->data->port_id));
1263 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1264 pci_dev->addr.domain,
1266 pci_dev->addr.devid,
1267 pci_dev->addr.function);
1271 * It executes link_update after knowing an interrupt occurred.
1274 * Pointer to struct rte_eth_dev.
1277 * - On success, zero.
1278 * - On failure, a negative value.
1281 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1282 struct rte_intr_handle *intr_handle)
1284 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1286 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1288 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1290 if (intr->flags & TXGBE_FLAG_MAILBOX)
1291 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1293 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1294 hw->phy.handle_lasi(hw);
1295 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1298 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1299 struct rte_eth_link link;
1301 /*get the link status before link update, for predicting later*/
1302 rte_eth_linkstatus_get(dev, &link);
1304 txgbe_dev_link_update(dev, 0);
1307 if (!link.link_status)
1308 /* handle it 1 sec later, wait it being stable */
1309 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
1310 /* likely to down */
1312 /* handle it 4 sec later, wait it being stable */
1313 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
1315 txgbe_dev_link_status_print(dev);
1316 if (rte_eal_alarm_set(timeout * 1000,
1317 txgbe_dev_interrupt_delayed_handler,
1319 PMD_DRV_LOG(ERR, "Error setting alarm");
1321 /* remember original mask */
1322 intr->mask_misc_orig = intr->mask_misc;
1323 /* only disable lsc interrupt */
1324 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1328 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1329 txgbe_enable_intr(dev);
1330 rte_intr_enable(intr_handle);
1336 * Interrupt handler which shall be registered for alarm callback for delayed
1337 * handling specific interrupt to wait for the stable nic state. As the
1338 * NIC interrupt state is not stable for txgbe after link is just down,
1339 * it needs to wait 4 seconds to get the stable status.
1342 * Pointer to interrupt handle.
1344 * The address of parameter (struct rte_eth_dev *) registered before.
1350 txgbe_dev_interrupt_delayed_handler(void *param)
1352 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1353 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1354 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1355 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1356 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1359 txgbe_disable_intr(hw);
1361 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1363 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1364 hw->phy.handle_lasi(hw);
1365 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1368 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1369 txgbe_dev_link_update(dev, 0);
1370 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1371 txgbe_dev_link_status_print(dev);
1372 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1376 if (intr->flags & TXGBE_FLAG_MACSEC) {
1377 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1379 intr->flags &= ~TXGBE_FLAG_MACSEC;
1382 /* restore original mask */
1383 intr->mask_misc = intr->mask_misc_orig;
1384 intr->mask_misc_orig = 0;
1386 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1387 txgbe_enable_intr(dev);
1388 rte_intr_enable(intr_handle);
1392 * Interrupt handler triggered by NIC for handling
1393 * specific interrupt.
1396 * Pointer to interrupt handle.
1398 * The address of parameter (struct rte_eth_dev *) registered before.
1404 txgbe_dev_interrupt_handler(void *param)
1406 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1408 txgbe_dev_interrupt_get_status(dev);
1409 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1413 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1414 uint32_t index, uint32_t pool)
1416 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1417 uint32_t enable_addr = 1;
1419 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1424 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1426 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1428 txgbe_clear_rar(hw, index);
1432 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1434 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1436 txgbe_remove_rar(dev, 0);
1437 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1443 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1445 uint32_t vector = 0;
1447 switch (hw->mac.mc_filter_type) {
1448 case 0: /* use bits [47:36] of the address */
1449 vector = ((uc_addr->addr_bytes[4] >> 4) |
1450 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1452 case 1: /* use bits [46:35] of the address */
1453 vector = ((uc_addr->addr_bytes[4] >> 3) |
1454 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1456 case 2: /* use bits [45:34] of the address */
1457 vector = ((uc_addr->addr_bytes[4] >> 2) |
1458 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1460 case 3: /* use bits [43:32] of the address */
1461 vector = ((uc_addr->addr_bytes[4]) |
1462 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1464 default: /* Invalid mc_filter_type */
1468 /* vector can only be 12-bits or boundary will be exceeded */
1474 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1475 struct rte_ether_addr *mac_addr, uint8_t on)
1483 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1484 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1486 /* The UTA table only exists on pf hardware */
1487 if (hw->mac.type < txgbe_mac_raptor)
1490 vector = txgbe_uta_vector(hw, mac_addr);
1491 uta_idx = (vector >> 5) & 0x7F;
1492 uta_mask = 0x1UL << (vector & 0x1F);
1494 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1497 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1499 uta_info->uta_in_use++;
1500 reg_val |= uta_mask;
1501 uta_info->uta_shadow[uta_idx] |= uta_mask;
1503 uta_info->uta_in_use--;
1504 reg_val &= ~uta_mask;
1505 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1508 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1510 psrctl = rd32(hw, TXGBE_PSRCTL);
1511 if (uta_info->uta_in_use > 0)
1512 psrctl |= TXGBE_PSRCTL_UCHFENA;
1514 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1516 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1517 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1518 wr32(hw, TXGBE_PSRCTL, psrctl);
1524 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1526 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1527 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1531 /* The UTA table only exists on pf hardware */
1532 if (hw->mac.type < txgbe_mac_raptor)
1536 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1537 uta_info->uta_shadow[i] = ~0;
1538 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1541 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1542 uta_info->uta_shadow[i] = 0;
1543 wr32(hw, TXGBE_UCADDRTBL(i), 0);
1547 psrctl = rd32(hw, TXGBE_PSRCTL);
1549 psrctl |= TXGBE_PSRCTL_UCHFENA;
1551 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1553 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1554 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1555 wr32(hw, TXGBE_PSRCTL, psrctl);
1561 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1563 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1564 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1566 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1568 if (queue_id < 32) {
1569 mask = rd32(hw, TXGBE_IMS(0));
1570 mask &= (1 << queue_id);
1571 wr32(hw, TXGBE_IMS(0), mask);
1572 } else if (queue_id < 64) {
1573 mask = rd32(hw, TXGBE_IMS(1));
1574 mask &= (1 << (queue_id - 32));
1575 wr32(hw, TXGBE_IMS(1), mask);
1577 rte_intr_enable(intr_handle);
1583 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1586 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1588 if (queue_id < 32) {
1589 mask = rd32(hw, TXGBE_IMS(0));
1590 mask &= ~(1 << queue_id);
1591 wr32(hw, TXGBE_IMS(0), mask);
1592 } else if (queue_id < 64) {
1593 mask = rd32(hw, TXGBE_IMS(1));
1594 mask &= ~(1 << (queue_id - 32));
1595 wr32(hw, TXGBE_IMS(1), mask);
1602 * set the IVAR registers, mapping interrupt causes to vectors
1604 * pointer to txgbe_hw struct
1606 * 0 for Rx, 1 for Tx, -1 for other causes
1608 * queue to map the corresponding interrupt to
1610 * the vector to map to the corresponding queue
1613 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1614 uint8_t queue, uint8_t msix_vector)
1618 if (direction == -1) {
1620 msix_vector |= TXGBE_IVARMISC_VLD;
1622 tmp = rd32(hw, TXGBE_IVARMISC);
1623 tmp &= ~(0xFF << idx);
1624 tmp |= (msix_vector << idx);
1625 wr32(hw, TXGBE_IVARMISC, tmp);
1627 /* rx or tx causes */
1628 /* Workround for ICR lost */
1629 idx = ((16 * (queue & 1)) + (8 * direction));
1630 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1631 tmp &= ~(0xFF << idx);
1632 tmp |= (msix_vector << idx);
1633 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1638 * Sets up the hardware to properly generate MSI-X interrupts
1640 * board private structure
1643 txgbe_configure_msix(struct rte_eth_dev *dev)
1645 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1646 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1647 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1648 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1649 uint32_t vec = TXGBE_MISC_VEC_ID;
1652 /* won't configure msix register if no mapping is done
1653 * between intr vector and event fd
1654 * but if misx has been enabled already, need to configure
1655 * auto clean, auto mask and throttling.
1657 gpie = rd32(hw, TXGBE_GPIE);
1658 if (!rte_intr_dp_is_en(intr_handle) &&
1659 !(gpie & TXGBE_GPIE_MSIX))
1662 if (rte_intr_allow_others(intr_handle)) {
1663 base = TXGBE_RX_VEC_START;
1667 /* setup GPIE for MSI-x mode */
1668 gpie = rd32(hw, TXGBE_GPIE);
1669 gpie |= TXGBE_GPIE_MSIX;
1670 wr32(hw, TXGBE_GPIE, gpie);
1672 /* Populate the IVAR table and set the ITR values to the
1673 * corresponding register.
1675 if (rte_intr_dp_is_en(intr_handle)) {
1676 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1678 /* by default, 1:1 mapping */
1679 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1680 intr_handle->intr_vec[queue_id] = vec;
1681 if (vec < base + intr_handle->nb_efd - 1)
1685 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1687 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1688 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1693 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1694 u8 **mc_addr_ptr, u32 *vmdq)
1699 mc_addr = *mc_addr_ptr;
1700 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1705 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1706 struct rte_ether_addr *mc_addr_set,
1707 uint32_t nb_mc_addr)
1709 struct txgbe_hw *hw;
1712 hw = TXGBE_DEV_HW(dev);
1713 mc_addr_list = (u8 *)mc_addr_set;
1714 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1715 txgbe_dev_addr_list_itr, TRUE);
1718 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1719 .dev_configure = txgbe_dev_configure,
1720 .dev_infos_get = txgbe_dev_info_get,
1721 .dev_start = txgbe_dev_start,
1722 .dev_stop = txgbe_dev_stop,
1723 .dev_set_link_up = txgbe_dev_set_link_up,
1724 .dev_set_link_down = txgbe_dev_set_link_down,
1725 .dev_close = txgbe_dev_close,
1726 .dev_reset = txgbe_dev_reset,
1727 .link_update = txgbe_dev_link_update,
1728 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
1729 .rx_queue_start = txgbe_dev_rx_queue_start,
1730 .rx_queue_stop = txgbe_dev_rx_queue_stop,
1731 .tx_queue_start = txgbe_dev_tx_queue_start,
1732 .tx_queue_stop = txgbe_dev_tx_queue_stop,
1733 .rx_queue_setup = txgbe_dev_rx_queue_setup,
1734 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
1735 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
1736 .rx_queue_release = txgbe_dev_rx_queue_release,
1737 .tx_queue_setup = txgbe_dev_tx_queue_setup,
1738 .tx_queue_release = txgbe_dev_tx_queue_release,
1739 .mac_addr_add = txgbe_add_rar,
1740 .mac_addr_remove = txgbe_remove_rar,
1741 .mac_addr_set = txgbe_set_default_mac_addr,
1742 .uc_hash_table_set = txgbe_uc_hash_table_set,
1743 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
1744 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
1747 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1748 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1749 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1751 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1752 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1754 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1755 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1757 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1758 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1761 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1762 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);