1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37 struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
43 * The set of PCI devices this driver supports
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48 { .vendor_id = 0, /* sentinel */ },
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52 .nb_max = TXGBE_RING_DESC_MAX,
53 .nb_min = TXGBE_RING_DESC_MIN,
54 .nb_align = TXGBE_RXD_ALIGN,
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58 .nb_max = TXGBE_RING_DESC_MAX,
59 .nb_min = TXGBE_RING_DESC_MIN,
60 .nb_align = TXGBE_TXD_ALIGN,
61 .nb_seg_max = TXGBE_TX_MAX_SEG,
62 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
68 txgbe_is_sfp(struct txgbe_hw *hw)
70 switch (hw->phy.type) {
71 case txgbe_phy_sfp_avago:
72 case txgbe_phy_sfp_ftl:
73 case txgbe_phy_sfp_intel:
74 case txgbe_phy_sfp_unknown:
75 case txgbe_phy_sfp_tyco_passive:
76 case txgbe_phy_sfp_unknown_passive:
84 txgbe_pf_reset_hw(struct txgbe_hw *hw)
89 status = hw->mac.reset_hw(hw);
91 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
92 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
93 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
94 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
97 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
103 txgbe_enable_intr(struct rte_eth_dev *dev)
105 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
106 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
108 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
109 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
110 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
115 txgbe_disable_intr(struct txgbe_hw *hw)
117 PMD_INIT_FUNC_TRACE();
119 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
120 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
121 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
126 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
128 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
129 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
130 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
131 const struct rte_memzone *mz;
135 PMD_INIT_FUNC_TRACE();
137 eth_dev->dev_ops = &txgbe_eth_dev_ops;
138 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
139 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
140 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
143 * For secondary processes, we don't initialise any further as primary
144 * has already done this work. Only check we don't need a different
145 * RX and TX function.
147 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
148 struct txgbe_tx_queue *txq;
149 /* TX queue function in primary, set by last queue initialized
150 * Tx queue may not initialized by primary process
152 if (eth_dev->data->tx_queues) {
153 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
154 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
155 txgbe_set_tx_function(eth_dev, txq);
157 /* Use default TX function if we get here */
158 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
159 "Using default TX function.");
162 txgbe_set_rx_function(eth_dev);
167 rte_eth_copy_pci_info(eth_dev, pci_dev);
169 /* Vendor and Device ID need to be set before init of shared code */
170 hw->device_id = pci_dev->id.device_id;
171 hw->vendor_id = pci_dev->id.vendor_id;
172 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
173 hw->allow_unsupported_sfp = 1;
175 /* Reserve memory for interrupt status block */
176 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
177 16, TXGBE_ALIGN, SOCKET_ID_ANY);
181 hw->isb_dma = TMZ_PADDR(mz);
182 hw->isb_mem = TMZ_VADDR(mz);
184 /* Initialize the shared code (base driver) */
185 err = txgbe_init_shared_code(hw);
187 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
191 err = hw->rom.init_params(hw);
193 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
197 /* Make sure we have a good EEPROM before we read from it */
198 err = hw->rom.validate_checksum(hw, &csum);
200 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
204 err = hw->mac.init_hw(hw);
207 * Devices with copper phys will fail to initialise if txgbe_init_hw()
208 * is called too soon after the kernel driver unbinding/binding occurs.
209 * The failure occurs in txgbe_identify_phy() for all devices,
210 * but for non-copper devies, txgbe_identify_sfp_module() is
211 * also called. See txgbe_identify_phy(). The reason for the
212 * failure is not known, and only occuts when virtualisation features
213 * are disabled in the bios. A delay of 200ms was found to be enough by
214 * trial-and-error, and is doubled to be safe.
216 if (err && hw->phy.media_type == txgbe_media_type_copper) {
218 err = hw->mac.init_hw(hw);
221 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
224 if (err == TXGBE_ERR_EEPROM_VERSION) {
225 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
226 "LOM. Please be aware there may be issues associated "
227 "with your hardware.");
228 PMD_INIT_LOG(ERR, "If you are experiencing problems "
229 "please contact your hardware representative "
230 "who provided you with this hardware.");
231 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
232 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
235 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
239 /* disable interrupt */
240 txgbe_disable_intr(hw);
242 /* Allocate memory for storing MAC addresses */
243 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
244 hw->mac.num_rar_entries, 0);
245 if (eth_dev->data->mac_addrs == NULL) {
247 "Failed to allocate %u bytes needed to store "
249 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
253 /* Copy the permanent MAC address */
254 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
255 ð_dev->data->mac_addrs[0]);
257 /* Allocate memory for storing hash filter MAC addresses */
258 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
259 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
260 if (eth_dev->data->hash_mac_addrs == NULL) {
262 "Failed to allocate %d bytes needed to store MAC addresses",
263 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
267 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
268 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
269 (int)hw->mac.type, (int)hw->phy.type,
270 (int)hw->phy.sfp_type);
272 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
273 (int)hw->mac.type, (int)hw->phy.type);
275 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
276 eth_dev->data->port_id, pci_dev->id.vendor_id,
277 pci_dev->id.device_id);
279 rte_intr_callback_register(intr_handle,
280 txgbe_dev_interrupt_handler, eth_dev);
282 /* enable uio/vfio intr/eventfd mapping */
283 rte_intr_enable(intr_handle);
285 /* enable support intr */
286 txgbe_enable_intr(eth_dev);
292 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
294 PMD_INIT_FUNC_TRACE();
296 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
299 txgbe_dev_close(eth_dev);
305 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
306 struct rte_pci_device *pci_dev)
308 struct rte_eth_dev *pf_ethdev;
309 struct rte_eth_devargs eth_da;
312 if (pci_dev->device.devargs) {
313 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
318 memset(ð_da, 0, sizeof(eth_da));
321 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
322 sizeof(struct txgbe_adapter),
323 eth_dev_pci_specific_init, pci_dev,
324 eth_txgbe_dev_init, NULL);
326 if (retval || eth_da.nb_representor_ports < 1)
329 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
330 if (pf_ethdev == NULL)
336 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
338 struct rte_eth_dev *ethdev;
340 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
344 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
347 static struct rte_pci_driver rte_txgbe_pmd = {
348 .id_table = pci_id_txgbe_map,
349 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
350 RTE_PCI_DRV_INTR_LSC,
351 .probe = eth_txgbe_pci_probe,
352 .remove = eth_txgbe_pci_remove,
356 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
358 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
363 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
366 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
372 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
373 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
374 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
375 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
380 txgbe_check_mq_mode(struct rte_eth_dev *dev)
382 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
383 uint16_t nb_rx_q = dev->data->nb_rx_queues;
384 uint16_t nb_tx_q = dev->data->nb_tx_queues;
386 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
387 /* check multi-queue mode */
388 switch (dev_conf->rxmode.mq_mode) {
389 case ETH_MQ_RX_VMDQ_DCB:
390 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
392 case ETH_MQ_RX_VMDQ_DCB_RSS:
393 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
394 PMD_INIT_LOG(ERR, "SRIOV active,"
395 " unsupported mq_mode rx %d.",
396 dev_conf->rxmode.mq_mode);
399 case ETH_MQ_RX_VMDQ_RSS:
400 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
401 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
402 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
403 PMD_INIT_LOG(ERR, "SRIOV is active,"
404 " invalid queue number"
405 " for VMDQ RSS, allowed"
406 " value are 1, 2 or 4.");
410 case ETH_MQ_RX_VMDQ_ONLY:
412 /* if nothing mq mode configure, use default scheme */
413 dev->data->dev_conf.rxmode.mq_mode =
416 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
417 /* SRIOV only works in VMDq enable mode */
418 PMD_INIT_LOG(ERR, "SRIOV is active,"
419 " wrong mq_mode rx %d.",
420 dev_conf->rxmode.mq_mode);
424 switch (dev_conf->txmode.mq_mode) {
425 case ETH_MQ_TX_VMDQ_DCB:
426 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
427 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
429 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
430 dev->data->dev_conf.txmode.mq_mode =
435 /* check valid queue number */
436 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
437 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
438 PMD_INIT_LOG(ERR, "SRIOV is active,"
439 " nb_rx_q=%d nb_tx_q=%d queue number"
440 " must be less than or equal to %d.",
442 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
446 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
447 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
451 /* check configuration for vmdb+dcb mode */
452 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
453 const struct rte_eth_vmdq_dcb_conf *conf;
455 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
456 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
457 TXGBE_VMDQ_DCB_NB_QUEUES);
460 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
461 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
462 conf->nb_queue_pools == ETH_32_POOLS)) {
463 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
464 " nb_queue_pools must be %d or %d.",
465 ETH_16_POOLS, ETH_32_POOLS);
469 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
470 const struct rte_eth_vmdq_dcb_tx_conf *conf;
472 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
473 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
474 TXGBE_VMDQ_DCB_NB_QUEUES);
477 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
478 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
479 conf->nb_queue_pools == ETH_32_POOLS)) {
480 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
481 " nb_queue_pools != %d and"
482 " nb_queue_pools != %d.",
483 ETH_16_POOLS, ETH_32_POOLS);
488 /* For DCB mode check our configuration before we go further */
489 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
490 const struct rte_eth_dcb_rx_conf *conf;
492 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
493 if (!(conf->nb_tcs == ETH_4_TCS ||
494 conf->nb_tcs == ETH_8_TCS)) {
495 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
496 " and nb_tcs != %d.",
497 ETH_4_TCS, ETH_8_TCS);
502 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
503 const struct rte_eth_dcb_tx_conf *conf;
505 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
506 if (!(conf->nb_tcs == ETH_4_TCS ||
507 conf->nb_tcs == ETH_8_TCS)) {
508 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
509 " and nb_tcs != %d.",
510 ETH_4_TCS, ETH_8_TCS);
519 txgbe_dev_configure(struct rte_eth_dev *dev)
521 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
522 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
525 PMD_INIT_FUNC_TRACE();
527 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
528 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
530 /* multiple queue mode checking */
531 ret = txgbe_check_mq_mode(dev);
533 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
538 /* set flag to update link status after init */
539 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
542 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
543 * allocation Rx preconditions we will reset it.
545 adapter->rx_bulk_alloc_allowed = true;
551 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
553 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
554 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
557 gpie = rd32(hw, TXGBE_GPIOINTEN);
558 gpie |= TXGBE_GPIOBIT_6;
559 wr32(hw, TXGBE_GPIOINTEN, gpie);
560 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
564 * Configure device link speed and setup link.
565 * It returns 0 on success.
568 txgbe_dev_start(struct rte_eth_dev *dev)
570 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
571 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
572 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
573 uint32_t intr_vector = 0;
575 bool link_up = false, negotiate = 0;
577 uint32_t allowed_speeds = 0;
579 uint32_t *link_speeds;
581 PMD_INIT_FUNC_TRACE();
583 /* TXGBE devices don't support:
584 * - half duplex (checked afterwards for valid speeds)
585 * - fixed speed: TODO implement
587 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
589 "Invalid link_speeds for port %u, fix speed not supported",
594 /* Stop the link setup handler before resetting the HW. */
595 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
597 /* disable uio/vfio intr/eventfd mapping */
598 rte_intr_disable(intr_handle);
601 hw->adapter_stopped = 0;
604 /* reinitialize adapter
605 * this calls reset and start
607 hw->nb_rx_queues = dev->data->nb_rx_queues;
608 hw->nb_tx_queues = dev->data->nb_tx_queues;
609 status = txgbe_pf_reset_hw(hw);
612 hw->mac.start_hw(hw);
613 hw->mac.get_link_status = true;
615 txgbe_dev_phy_intr_setup(dev);
617 /* check and configure queue intr-vector mapping */
618 if ((rte_intr_cap_multiple(intr_handle) ||
619 !RTE_ETH_DEV_SRIOV(dev).active) &&
620 dev->data->dev_conf.intr_conf.rxq != 0) {
621 intr_vector = dev->data->nb_rx_queues;
622 if (rte_intr_efd_enable(intr_handle, intr_vector))
626 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
627 intr_handle->intr_vec =
628 rte_zmalloc("intr_vec",
629 dev->data->nb_rx_queues * sizeof(int), 0);
630 if (intr_handle->intr_vec == NULL) {
631 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
632 " intr_vec", dev->data->nb_rx_queues);
637 /* confiugre msix for sleep until rx interrupt */
638 txgbe_configure_msix(dev);
640 /* initialize transmission unit */
641 txgbe_dev_tx_init(dev);
643 /* This can fail when allocating mbufs for descriptor rings */
644 err = txgbe_dev_rx_init(dev);
646 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
650 err = txgbe_dev_rxtx_start(dev);
652 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
656 /* Skip link setup if loopback mode is enabled. */
657 if (hw->mac.type == txgbe_mac_raptor &&
658 dev->data->dev_conf.lpbk_mode)
659 goto skip_link_setup;
661 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
662 err = hw->mac.setup_sfp(hw);
667 if (hw->phy.media_type == txgbe_media_type_copper) {
668 /* Turn on the copper */
669 hw->phy.set_phy_power(hw, true);
671 /* Turn on the laser */
672 hw->mac.enable_tx_laser(hw);
675 err = hw->mac.check_link(hw, &speed, &link_up, 0);
678 dev->data->dev_link.link_status = link_up;
680 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
684 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
687 link_speeds = &dev->data->dev_conf.link_speeds;
688 if (*link_speeds & ~allowed_speeds) {
689 PMD_INIT_LOG(ERR, "Invalid link setting");
694 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
695 speed = (TXGBE_LINK_SPEED_100M_FULL |
696 TXGBE_LINK_SPEED_1GB_FULL |
697 TXGBE_LINK_SPEED_10GB_FULL);
699 if (*link_speeds & ETH_LINK_SPEED_10G)
700 speed |= TXGBE_LINK_SPEED_10GB_FULL;
701 if (*link_speeds & ETH_LINK_SPEED_5G)
702 speed |= TXGBE_LINK_SPEED_5GB_FULL;
703 if (*link_speeds & ETH_LINK_SPEED_2_5G)
704 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
705 if (*link_speeds & ETH_LINK_SPEED_1G)
706 speed |= TXGBE_LINK_SPEED_1GB_FULL;
707 if (*link_speeds & ETH_LINK_SPEED_100M)
708 speed |= TXGBE_LINK_SPEED_100M_FULL;
711 err = hw->mac.setup_link(hw, speed, link_up);
717 if (rte_intr_allow_others(intr_handle)) {
718 /* check if lsc interrupt is enabled */
719 if (dev->data->dev_conf.intr_conf.lsc != 0)
720 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
722 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
723 txgbe_dev_macsec_interrupt_setup(dev);
724 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
726 rte_intr_callback_unregister(intr_handle,
727 txgbe_dev_interrupt_handler, dev);
728 if (dev->data->dev_conf.intr_conf.lsc != 0)
729 PMD_INIT_LOG(INFO, "lsc won't enable because of"
730 " no intr multiplex");
733 /* check if rxq interrupt is enabled */
734 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
735 rte_intr_dp_is_en(intr_handle))
736 txgbe_dev_rxq_interrupt_setup(dev);
738 /* enable uio/vfio intr/eventfd mapping */
739 rte_intr_enable(intr_handle);
741 /* resume enabled intr since hw reset */
742 txgbe_enable_intr(dev);
745 * Update link status right before return, because it may
746 * start link configuration process in a separate thread.
748 txgbe_dev_link_update(dev, 0);
750 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
755 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
756 txgbe_dev_clear_queues(dev);
761 * Set device link up: enable tx.
764 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
766 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
768 if (hw->phy.media_type == txgbe_media_type_copper) {
769 /* Turn on the copper */
770 hw->phy.set_phy_power(hw, true);
772 /* Turn on the laser */
773 hw->mac.enable_tx_laser(hw);
774 txgbe_dev_link_update(dev, 0);
781 * Set device link down: disable tx.
784 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
786 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
788 if (hw->phy.media_type == txgbe_media_type_copper) {
789 /* Turn off the copper */
790 hw->phy.set_phy_power(hw, false);
792 /* Turn off the laser */
793 hw->mac.disable_tx_laser(hw);
794 txgbe_dev_link_update(dev, 0);
801 * Reset and stop device.
804 txgbe_dev_close(struct rte_eth_dev *dev)
806 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
807 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
811 PMD_INIT_FUNC_TRACE();
813 txgbe_dev_free_queues(dev);
815 /* disable uio intr before callback unregister */
816 rte_intr_disable(intr_handle);
819 ret = rte_intr_callback_unregister(intr_handle,
820 txgbe_dev_interrupt_handler, dev);
821 if (ret >= 0 || ret == -ENOENT) {
823 } else if (ret != -EAGAIN) {
825 "intr callback unregister failed: %d",
829 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
831 /* cancel the delay handler before remove dev */
832 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
834 rte_free(dev->data->mac_addrs);
835 dev->data->mac_addrs = NULL;
837 rte_free(dev->data->hash_mac_addrs);
838 dev->data->hash_mac_addrs = NULL;
844 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
846 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
847 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
849 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
850 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
851 dev_info->min_rx_bufsize = 1024;
852 dev_info->max_rx_pktlen = 15872;
853 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
854 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
855 dev_info->max_vfs = pci_dev->max_vfs;
856 dev_info->max_vmdq_pools = ETH_64_POOLS;
857 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
858 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
859 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
860 dev_info->rx_queue_offload_capa);
861 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
862 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
864 dev_info->default_rxconf = (struct rte_eth_rxconf) {
866 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
867 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
868 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
870 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
875 dev_info->default_txconf = (struct rte_eth_txconf) {
877 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
878 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
879 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
881 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
885 dev_info->rx_desc_lim = rx_desc_lim;
886 dev_info->tx_desc_lim = tx_desc_lim;
888 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
889 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
890 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
892 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
893 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
895 /* Driver-preferred Rx/Tx parameters */
896 dev_info->default_rxportconf.burst_size = 32;
897 dev_info->default_txportconf.burst_size = 32;
898 dev_info->default_rxportconf.nb_queues = 1;
899 dev_info->default_txportconf.nb_queues = 1;
900 dev_info->default_rxportconf.ring_size = 256;
901 dev_info->default_txportconf.ring_size = 256;
907 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
909 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
910 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
911 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
912 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
913 return txgbe_get_supported_ptypes();
919 txgbe_dev_setup_link_alarm_handler(void *param)
921 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
922 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
923 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
925 bool autoneg = false;
927 speed = hw->phy.autoneg_advertised;
929 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
931 hw->mac.setup_link(hw, speed, true);
933 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
936 /* return 0 means link status changed, -1 means not changed */
938 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
939 int wait_to_complete)
941 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
942 struct rte_eth_link link;
943 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
944 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
949 memset(&link, 0, sizeof(link));
950 link.link_status = ETH_LINK_DOWN;
951 link.link_speed = ETH_SPEED_NUM_NONE;
952 link.link_duplex = ETH_LINK_HALF_DUPLEX;
953 link.link_autoneg = ETH_LINK_AUTONEG;
955 hw->mac.get_link_status = true;
957 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
958 return rte_eth_linkstatus_set(dev, &link);
960 /* check if it needs to wait to complete, if lsc interrupt is enabled */
961 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
964 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
967 link.link_speed = ETH_SPEED_NUM_100M;
968 link.link_duplex = ETH_LINK_FULL_DUPLEX;
969 return rte_eth_linkstatus_set(dev, &link);
973 if (hw->phy.media_type == txgbe_media_type_fiber) {
974 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
975 rte_eal_alarm_set(10,
976 txgbe_dev_setup_link_alarm_handler, dev);
978 return rte_eth_linkstatus_set(dev, &link);
981 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
982 link.link_status = ETH_LINK_UP;
983 link.link_duplex = ETH_LINK_FULL_DUPLEX;
985 switch (link_speed) {
987 case TXGBE_LINK_SPEED_UNKNOWN:
988 link.link_duplex = ETH_LINK_FULL_DUPLEX;
989 link.link_speed = ETH_SPEED_NUM_100M;
992 case TXGBE_LINK_SPEED_100M_FULL:
993 link.link_speed = ETH_SPEED_NUM_100M;
996 case TXGBE_LINK_SPEED_1GB_FULL:
997 link.link_speed = ETH_SPEED_NUM_1G;
1000 case TXGBE_LINK_SPEED_2_5GB_FULL:
1001 link.link_speed = ETH_SPEED_NUM_2_5G;
1004 case TXGBE_LINK_SPEED_5GB_FULL:
1005 link.link_speed = ETH_SPEED_NUM_5G;
1008 case TXGBE_LINK_SPEED_10GB_FULL:
1009 link.link_speed = ETH_SPEED_NUM_10G;
1013 return rte_eth_linkstatus_set(dev, &link);
1017 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1019 return txgbe_dev_link_update_share(dev, wait_to_complete);
1023 * It clears the interrupt causes and enables the interrupt.
1024 * It will be called once only during nic initialized.
1027 * Pointer to struct rte_eth_dev.
1029 * Enable or Disable.
1032 * - On success, zero.
1033 * - On failure, a negative value.
1036 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1038 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1040 txgbe_dev_link_status_print(dev);
1042 intr->mask_misc |= TXGBE_ICRMISC_LSC;
1044 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1050 * It clears the interrupt causes and enables the interrupt.
1051 * It will be called once only during nic initialized.
1054 * Pointer to struct rte_eth_dev.
1057 * - On success, zero.
1058 * - On failure, a negative value.
1061 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1063 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1065 intr->mask[0] |= TXGBE_ICR_MASK;
1066 intr->mask[1] |= TXGBE_ICR_MASK;
1072 * It clears the interrupt causes and enables the interrupt.
1073 * It will be called once only during nic initialized.
1076 * Pointer to struct rte_eth_dev.
1079 * - On success, zero.
1080 * - On failure, a negative value.
1083 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1085 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1087 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
1093 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
1096 * Pointer to struct rte_eth_dev.
1099 * - On success, zero.
1100 * - On failure, a negative value.
1103 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1106 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1107 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1109 /* clear all cause mask */
1110 txgbe_disable_intr(hw);
1112 /* read-on-clear nic registers here */
1113 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1114 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1118 /* set flag for async link update */
1119 if (eicr & TXGBE_ICRMISC_LSC)
1120 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1122 if (eicr & TXGBE_ICRMISC_VFMBX)
1123 intr->flags |= TXGBE_FLAG_MAILBOX;
1125 if (eicr & TXGBE_ICRMISC_LNKSEC)
1126 intr->flags |= TXGBE_FLAG_MACSEC;
1128 if (eicr & TXGBE_ICRMISC_GPIO)
1129 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
1135 * It gets and then prints the link status.
1138 * Pointer to struct rte_eth_dev.
1141 * - On success, zero.
1142 * - On failure, a negative value.
1145 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
1147 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1148 struct rte_eth_link link;
1150 rte_eth_linkstatus_get(dev, &link);
1152 if (link.link_status) {
1153 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1154 (int)(dev->data->port_id),
1155 (unsigned int)link.link_speed,
1156 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1157 "full-duplex" : "half-duplex");
1159 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1160 (int)(dev->data->port_id));
1162 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1163 pci_dev->addr.domain,
1165 pci_dev->addr.devid,
1166 pci_dev->addr.function);
1170 * It executes link_update after knowing an interrupt occurred.
1173 * Pointer to struct rte_eth_dev.
1176 * - On success, zero.
1177 * - On failure, a negative value.
1180 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1181 struct rte_intr_handle *intr_handle)
1183 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1185 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1187 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1189 if (intr->flags & TXGBE_FLAG_MAILBOX)
1190 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1192 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1193 hw->phy.handle_lasi(hw);
1194 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1197 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1198 struct rte_eth_link link;
1200 /*get the link status before link update, for predicting later*/
1201 rte_eth_linkstatus_get(dev, &link);
1203 txgbe_dev_link_update(dev, 0);
1206 if (!link.link_status)
1207 /* handle it 1 sec later, wait it being stable */
1208 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
1209 /* likely to down */
1211 /* handle it 4 sec later, wait it being stable */
1212 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
1214 txgbe_dev_link_status_print(dev);
1215 if (rte_eal_alarm_set(timeout * 1000,
1216 txgbe_dev_interrupt_delayed_handler,
1218 PMD_DRV_LOG(ERR, "Error setting alarm");
1220 /* remember original mask */
1221 intr->mask_misc_orig = intr->mask_misc;
1222 /* only disable lsc interrupt */
1223 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1227 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1228 txgbe_enable_intr(dev);
1229 rte_intr_enable(intr_handle);
1235 * Interrupt handler which shall be registered for alarm callback for delayed
1236 * handling specific interrupt to wait for the stable nic state. As the
1237 * NIC interrupt state is not stable for txgbe after link is just down,
1238 * it needs to wait 4 seconds to get the stable status.
1241 * Pointer to interrupt handle.
1243 * The address of parameter (struct rte_eth_dev *) registered before.
1249 txgbe_dev_interrupt_delayed_handler(void *param)
1251 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1252 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1253 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1254 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1255 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1258 txgbe_disable_intr(hw);
1260 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1262 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1263 hw->phy.handle_lasi(hw);
1264 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1267 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1268 txgbe_dev_link_update(dev, 0);
1269 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1270 txgbe_dev_link_status_print(dev);
1271 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1275 if (intr->flags & TXGBE_FLAG_MACSEC) {
1276 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1278 intr->flags &= ~TXGBE_FLAG_MACSEC;
1281 /* restore original mask */
1282 intr->mask_misc = intr->mask_misc_orig;
1283 intr->mask_misc_orig = 0;
1285 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1286 txgbe_enable_intr(dev);
1287 rte_intr_enable(intr_handle);
1291 * Interrupt handler triggered by NIC for handling
1292 * specific interrupt.
1295 * Pointer to interrupt handle.
1297 * The address of parameter (struct rte_eth_dev *) registered before.
1303 txgbe_dev_interrupt_handler(void *param)
1305 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1307 txgbe_dev_interrupt_get_status(dev);
1308 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1312 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1313 uint32_t index, uint32_t pool)
1315 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1316 uint32_t enable_addr = 1;
1318 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1323 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1325 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1327 txgbe_clear_rar(hw, index);
1331 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1333 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1335 txgbe_remove_rar(dev, 0);
1336 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1342 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1344 uint32_t vector = 0;
1346 switch (hw->mac.mc_filter_type) {
1347 case 0: /* use bits [47:36] of the address */
1348 vector = ((uc_addr->addr_bytes[4] >> 4) |
1349 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1351 case 1: /* use bits [46:35] of the address */
1352 vector = ((uc_addr->addr_bytes[4] >> 3) |
1353 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1355 case 2: /* use bits [45:34] of the address */
1356 vector = ((uc_addr->addr_bytes[4] >> 2) |
1357 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1359 case 3: /* use bits [43:32] of the address */
1360 vector = ((uc_addr->addr_bytes[4]) |
1361 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1363 default: /* Invalid mc_filter_type */
1367 /* vector can only be 12-bits or boundary will be exceeded */
1373 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1374 struct rte_ether_addr *mac_addr, uint8_t on)
1382 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1383 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1385 /* The UTA table only exists on pf hardware */
1386 if (hw->mac.type < txgbe_mac_raptor)
1389 vector = txgbe_uta_vector(hw, mac_addr);
1390 uta_idx = (vector >> 5) & 0x7F;
1391 uta_mask = 0x1UL << (vector & 0x1F);
1393 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1396 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1398 uta_info->uta_in_use++;
1399 reg_val |= uta_mask;
1400 uta_info->uta_shadow[uta_idx] |= uta_mask;
1402 uta_info->uta_in_use--;
1403 reg_val &= ~uta_mask;
1404 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1407 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1409 psrctl = rd32(hw, TXGBE_PSRCTL);
1410 if (uta_info->uta_in_use > 0)
1411 psrctl |= TXGBE_PSRCTL_UCHFENA;
1413 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1415 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1416 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1417 wr32(hw, TXGBE_PSRCTL, psrctl);
1423 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1426 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1430 /* The UTA table only exists on pf hardware */
1431 if (hw->mac.type < txgbe_mac_raptor)
1435 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1436 uta_info->uta_shadow[i] = ~0;
1437 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1440 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1441 uta_info->uta_shadow[i] = 0;
1442 wr32(hw, TXGBE_UCADDRTBL(i), 0);
1446 psrctl = rd32(hw, TXGBE_PSRCTL);
1448 psrctl |= TXGBE_PSRCTL_UCHFENA;
1450 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1452 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1453 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1454 wr32(hw, TXGBE_PSRCTL, psrctl);
1460 * set the IVAR registers, mapping interrupt causes to vectors
1462 * pointer to txgbe_hw struct
1464 * 0 for Rx, 1 for Tx, -1 for other causes
1466 * queue to map the corresponding interrupt to
1468 * the vector to map to the corresponding queue
1471 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1472 uint8_t queue, uint8_t msix_vector)
1476 if (direction == -1) {
1478 msix_vector |= TXGBE_IVARMISC_VLD;
1480 tmp = rd32(hw, TXGBE_IVARMISC);
1481 tmp &= ~(0xFF << idx);
1482 tmp |= (msix_vector << idx);
1483 wr32(hw, TXGBE_IVARMISC, tmp);
1485 /* rx or tx causes */
1486 /* Workround for ICR lost */
1487 idx = ((16 * (queue & 1)) + (8 * direction));
1488 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1489 tmp &= ~(0xFF << idx);
1490 tmp |= (msix_vector << idx);
1491 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1496 * Sets up the hardware to properly generate MSI-X interrupts
1498 * board private structure
1501 txgbe_configure_msix(struct rte_eth_dev *dev)
1503 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1504 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1505 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1506 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1507 uint32_t vec = TXGBE_MISC_VEC_ID;
1510 /* won't configure msix register if no mapping is done
1511 * between intr vector and event fd
1512 * but if misx has been enabled already, need to configure
1513 * auto clean, auto mask and throttling.
1515 gpie = rd32(hw, TXGBE_GPIE);
1516 if (!rte_intr_dp_is_en(intr_handle) &&
1517 !(gpie & TXGBE_GPIE_MSIX))
1520 if (rte_intr_allow_others(intr_handle)) {
1521 base = TXGBE_RX_VEC_START;
1525 /* setup GPIE for MSI-x mode */
1526 gpie = rd32(hw, TXGBE_GPIE);
1527 gpie |= TXGBE_GPIE_MSIX;
1528 wr32(hw, TXGBE_GPIE, gpie);
1530 /* Populate the IVAR table and set the ITR values to the
1531 * corresponding register.
1533 if (rte_intr_dp_is_en(intr_handle)) {
1534 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1536 /* by default, 1:1 mapping */
1537 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1538 intr_handle->intr_vec[queue_id] = vec;
1539 if (vec < base + intr_handle->nb_efd - 1)
1543 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1545 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1546 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1551 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1552 u8 **mc_addr_ptr, u32 *vmdq)
1557 mc_addr = *mc_addr_ptr;
1558 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1563 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1564 struct rte_ether_addr *mc_addr_set,
1565 uint32_t nb_mc_addr)
1567 struct txgbe_hw *hw;
1570 hw = TXGBE_DEV_HW(dev);
1571 mc_addr_list = (u8 *)mc_addr_set;
1572 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1573 txgbe_dev_addr_list_itr, TRUE);
1576 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1577 .dev_configure = txgbe_dev_configure,
1578 .dev_infos_get = txgbe_dev_info_get,
1579 .dev_start = txgbe_dev_start,
1580 .dev_set_link_up = txgbe_dev_set_link_up,
1581 .dev_set_link_down = txgbe_dev_set_link_down,
1582 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
1583 .rx_queue_start = txgbe_dev_rx_queue_start,
1584 .rx_queue_stop = txgbe_dev_rx_queue_stop,
1585 .tx_queue_start = txgbe_dev_tx_queue_start,
1586 .tx_queue_stop = txgbe_dev_tx_queue_stop,
1587 .rx_queue_setup = txgbe_dev_rx_queue_setup,
1588 .rx_queue_release = txgbe_dev_rx_queue_release,
1589 .tx_queue_setup = txgbe_dev_tx_queue_setup,
1590 .tx_queue_release = txgbe_dev_tx_queue_release,
1591 .mac_addr_add = txgbe_add_rar,
1592 .mac_addr_remove = txgbe_remove_rar,
1593 .mac_addr_set = txgbe_set_default_mac_addr,
1594 .uc_hash_table_set = txgbe_uc_hash_table_set,
1595 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
1596 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
1599 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1600 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1601 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1603 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1604 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1606 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1607 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1609 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1610 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1613 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1614 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);