1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_memory.h>
16 #include <rte_alarm.h>
18 #include "txgbe_logs.h"
19 #include "base/txgbe.h"
20 #include "txgbe_ethdev.h"
21 #include "txgbe_rxtx.h"
23 static int txgbe_dev_close(struct rte_eth_dev *dev);
25 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
26 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
27 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
28 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
29 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
30 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
31 struct rte_intr_handle *handle);
32 static void txgbe_dev_interrupt_handler(void *param);
33 static void txgbe_dev_interrupt_delayed_handler(void *param);
34 static void txgbe_configure_msix(struct rte_eth_dev *dev);
37 * The set of PCI devices this driver supports
39 static const struct rte_pci_id pci_id_txgbe_map[] = {
40 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
41 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
42 { .vendor_id = 0, /* sentinel */ },
45 static const struct rte_eth_desc_lim rx_desc_lim = {
46 .nb_max = TXGBE_RING_DESC_MAX,
47 .nb_min = TXGBE_RING_DESC_MIN,
48 .nb_align = TXGBE_RXD_ALIGN,
51 static const struct rte_eth_desc_lim tx_desc_lim = {
52 .nb_max = TXGBE_RING_DESC_MAX,
53 .nb_min = TXGBE_RING_DESC_MIN,
54 .nb_align = TXGBE_TXD_ALIGN,
55 .nb_seg_max = TXGBE_TX_MAX_SEG,
56 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
59 static const struct eth_dev_ops txgbe_eth_dev_ops;
62 txgbe_is_sfp(struct txgbe_hw *hw)
64 switch (hw->phy.type) {
65 case txgbe_phy_sfp_avago:
66 case txgbe_phy_sfp_ftl:
67 case txgbe_phy_sfp_intel:
68 case txgbe_phy_sfp_unknown:
69 case txgbe_phy_sfp_tyco_passive:
70 case txgbe_phy_sfp_unknown_passive:
78 txgbe_enable_intr(struct rte_eth_dev *dev)
80 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
81 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
83 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
84 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
85 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
90 txgbe_disable_intr(struct txgbe_hw *hw)
92 PMD_INIT_FUNC_TRACE();
94 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
95 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
96 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
101 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
103 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
104 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
105 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
106 const struct rte_memzone *mz;
110 PMD_INIT_FUNC_TRACE();
112 eth_dev->dev_ops = &txgbe_eth_dev_ops;
114 rte_eth_copy_pci_info(eth_dev, pci_dev);
116 /* Vendor and Device ID need to be set before init of shared code */
117 hw->device_id = pci_dev->id.device_id;
118 hw->vendor_id = pci_dev->id.vendor_id;
119 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
120 hw->allow_unsupported_sfp = 1;
122 /* Reserve memory for interrupt status block */
123 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
124 16, TXGBE_ALIGN, SOCKET_ID_ANY);
128 hw->isb_dma = TMZ_PADDR(mz);
129 hw->isb_mem = TMZ_VADDR(mz);
131 /* Initialize the shared code (base driver) */
132 err = txgbe_init_shared_code(hw);
134 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
138 err = hw->rom.init_params(hw);
140 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
144 /* Make sure we have a good EEPROM before we read from it */
145 err = hw->rom.validate_checksum(hw, &csum);
147 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
151 err = hw->mac.init_hw(hw);
154 * Devices with copper phys will fail to initialise if txgbe_init_hw()
155 * is called too soon after the kernel driver unbinding/binding occurs.
156 * The failure occurs in txgbe_identify_phy() for all devices,
157 * but for non-copper devies, txgbe_identify_sfp_module() is
158 * also called. See txgbe_identify_phy(). The reason for the
159 * failure is not known, and only occuts when virtualisation features
160 * are disabled in the bios. A delay of 200ms was found to be enough by
161 * trial-and-error, and is doubled to be safe.
163 if (err && hw->phy.media_type == txgbe_media_type_copper) {
165 err = hw->mac.init_hw(hw);
168 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
171 if (err == TXGBE_ERR_EEPROM_VERSION) {
172 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
173 "LOM. Please be aware there may be issues associated "
174 "with your hardware.");
175 PMD_INIT_LOG(ERR, "If you are experiencing problems "
176 "please contact your hardware representative "
177 "who provided you with this hardware.");
178 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
179 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
182 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
186 /* disable interrupt */
187 txgbe_disable_intr(hw);
189 /* Allocate memory for storing MAC addresses */
190 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
191 hw->mac.num_rar_entries, 0);
192 if (eth_dev->data->mac_addrs == NULL) {
194 "Failed to allocate %u bytes needed to store "
196 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
200 /* Copy the permanent MAC address */
201 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
202 ð_dev->data->mac_addrs[0]);
204 /* Allocate memory for storing hash filter MAC addresses */
205 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
206 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
207 if (eth_dev->data->hash_mac_addrs == NULL) {
209 "Failed to allocate %d bytes needed to store MAC addresses",
210 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
214 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
215 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
216 (int)hw->mac.type, (int)hw->phy.type,
217 (int)hw->phy.sfp_type);
219 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
220 (int)hw->mac.type, (int)hw->phy.type);
222 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
223 eth_dev->data->port_id, pci_dev->id.vendor_id,
224 pci_dev->id.device_id);
226 rte_intr_callback_register(intr_handle,
227 txgbe_dev_interrupt_handler, eth_dev);
229 /* enable uio/vfio intr/eventfd mapping */
230 rte_intr_enable(intr_handle);
232 /* enable support intr */
233 txgbe_enable_intr(eth_dev);
239 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
241 PMD_INIT_FUNC_TRACE();
243 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
246 txgbe_dev_close(eth_dev);
252 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
253 struct rte_pci_device *pci_dev)
255 struct rte_eth_dev *pf_ethdev;
256 struct rte_eth_devargs eth_da;
259 if (pci_dev->device.devargs) {
260 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
265 memset(ð_da, 0, sizeof(eth_da));
268 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
269 sizeof(struct txgbe_adapter),
270 eth_dev_pci_specific_init, pci_dev,
271 eth_txgbe_dev_init, NULL);
273 if (retval || eth_da.nb_representor_ports < 1)
276 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
277 if (pf_ethdev == NULL)
283 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
285 struct rte_eth_dev *ethdev;
287 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
291 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
294 static struct rte_pci_driver rte_txgbe_pmd = {
295 .id_table = pci_id_txgbe_map,
296 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
297 RTE_PCI_DRV_INTR_LSC,
298 .probe = eth_txgbe_pci_probe,
299 .remove = eth_txgbe_pci_remove,
304 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
306 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
307 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
310 gpie = rd32(hw, TXGBE_GPIOINTEN);
311 gpie |= TXGBE_GPIOBIT_6;
312 wr32(hw, TXGBE_GPIOINTEN, gpie);
313 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
317 * Reset and stop device.
320 txgbe_dev_close(struct rte_eth_dev *dev)
322 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
323 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
327 PMD_INIT_FUNC_TRACE();
329 /* disable uio intr before callback unregister */
330 rte_intr_disable(intr_handle);
333 ret = rte_intr_callback_unregister(intr_handle,
334 txgbe_dev_interrupt_handler, dev);
335 if (ret >= 0 || ret == -ENOENT) {
337 } else if (ret != -EAGAIN) {
339 "intr callback unregister failed: %d",
343 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
345 /* cancel the delay handler before remove dev */
346 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
348 rte_free(dev->data->mac_addrs);
349 dev->data->mac_addrs = NULL;
351 rte_free(dev->data->hash_mac_addrs);
352 dev->data->hash_mac_addrs = NULL;
358 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
361 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
363 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
364 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
365 dev_info->min_rx_bufsize = 1024;
366 dev_info->max_rx_pktlen = 15872;
367 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
368 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
369 dev_info->max_vfs = pci_dev->max_vfs;
370 dev_info->max_vmdq_pools = ETH_64_POOLS;
371 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
372 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
373 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
374 dev_info->rx_queue_offload_capa);
375 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
376 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
378 dev_info->default_rxconf = (struct rte_eth_rxconf) {
380 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
381 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
382 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
384 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
389 dev_info->default_txconf = (struct rte_eth_txconf) {
391 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
392 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
393 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
395 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
399 dev_info->rx_desc_lim = rx_desc_lim;
400 dev_info->tx_desc_lim = tx_desc_lim;
402 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
403 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
404 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
406 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
407 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
409 /* Driver-preferred Rx/Tx parameters */
410 dev_info->default_rxportconf.burst_size = 32;
411 dev_info->default_txportconf.burst_size = 32;
412 dev_info->default_rxportconf.nb_queues = 1;
413 dev_info->default_txportconf.nb_queues = 1;
414 dev_info->default_rxportconf.ring_size = 256;
415 dev_info->default_txportconf.ring_size = 256;
421 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
424 RTE_SET_USED(wait_to_complete);
429 * It clears the interrupt causes and enables the interrupt.
430 * It will be called once only during nic initialized.
433 * Pointer to struct rte_eth_dev.
438 * - On success, zero.
439 * - On failure, a negative value.
442 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
444 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
446 txgbe_dev_link_status_print(dev);
448 intr->mask_misc |= TXGBE_ICRMISC_LSC;
450 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
456 * It clears the interrupt causes and enables the interrupt.
457 * It will be called once only during nic initialized.
460 * Pointer to struct rte_eth_dev.
463 * - On success, zero.
464 * - On failure, a negative value.
467 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
469 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
471 intr->mask[0] |= TXGBE_ICR_MASK;
472 intr->mask[1] |= TXGBE_ICR_MASK;
478 * It clears the interrupt causes and enables the interrupt.
479 * It will be called once only during nic initialized.
482 * Pointer to struct rte_eth_dev.
485 * - On success, zero.
486 * - On failure, a negative value.
489 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
491 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
493 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
499 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
502 * Pointer to struct rte_eth_dev.
505 * - On success, zero.
506 * - On failure, a negative value.
509 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
512 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
513 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
515 /* clear all cause mask */
516 txgbe_disable_intr(hw);
518 /* read-on-clear nic registers here */
519 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
520 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
524 /* set flag for async link update */
525 if (eicr & TXGBE_ICRMISC_LSC)
526 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
528 if (eicr & TXGBE_ICRMISC_VFMBX)
529 intr->flags |= TXGBE_FLAG_MAILBOX;
531 if (eicr & TXGBE_ICRMISC_LNKSEC)
532 intr->flags |= TXGBE_FLAG_MACSEC;
534 if (eicr & TXGBE_ICRMISC_GPIO)
535 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
541 * It gets and then prints the link status.
544 * Pointer to struct rte_eth_dev.
547 * - On success, zero.
548 * - On failure, a negative value.
551 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
553 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
554 struct rte_eth_link link;
556 rte_eth_linkstatus_get(dev, &link);
558 if (link.link_status) {
559 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
560 (int)(dev->data->port_id),
561 (unsigned int)link.link_speed,
562 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
563 "full-duplex" : "half-duplex");
565 PMD_INIT_LOG(INFO, " Port %d: Link Down",
566 (int)(dev->data->port_id));
568 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
569 pci_dev->addr.domain,
572 pci_dev->addr.function);
576 * It executes link_update after knowing an interrupt occurred.
579 * Pointer to struct rte_eth_dev.
582 * - On success, zero.
583 * - On failure, a negative value.
586 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
587 struct rte_intr_handle *intr_handle)
589 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
591 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
593 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
595 if (intr->flags & TXGBE_FLAG_MAILBOX)
596 intr->flags &= ~TXGBE_FLAG_MAILBOX;
598 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
599 hw->phy.handle_lasi(hw);
600 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
603 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
604 struct rte_eth_link link;
606 /*get the link status before link update, for predicting later*/
607 rte_eth_linkstatus_get(dev, &link);
609 txgbe_dev_link_update(dev, 0);
612 if (!link.link_status)
613 /* handle it 1 sec later, wait it being stable */
614 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
617 /* handle it 4 sec later, wait it being stable */
618 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
620 txgbe_dev_link_status_print(dev);
621 if (rte_eal_alarm_set(timeout * 1000,
622 txgbe_dev_interrupt_delayed_handler,
624 PMD_DRV_LOG(ERR, "Error setting alarm");
626 /* remember original mask */
627 intr->mask_misc_orig = intr->mask_misc;
628 /* only disable lsc interrupt */
629 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
633 PMD_DRV_LOG(DEBUG, "enable intr immediately");
634 txgbe_enable_intr(dev);
635 rte_intr_enable(intr_handle);
641 * Interrupt handler which shall be registered for alarm callback for delayed
642 * handling specific interrupt to wait for the stable nic state. As the
643 * NIC interrupt state is not stable for txgbe after link is just down,
644 * it needs to wait 4 seconds to get the stable status.
647 * Pointer to interrupt handle.
649 * The address of parameter (struct rte_eth_dev *) registered before.
655 txgbe_dev_interrupt_delayed_handler(void *param)
657 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
658 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
659 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
660 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
661 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
664 txgbe_disable_intr(hw);
666 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
668 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
669 hw->phy.handle_lasi(hw);
670 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
673 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
674 txgbe_dev_link_update(dev, 0);
675 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
676 txgbe_dev_link_status_print(dev);
677 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
681 if (intr->flags & TXGBE_FLAG_MACSEC) {
682 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
684 intr->flags &= ~TXGBE_FLAG_MACSEC;
687 /* restore original mask */
688 intr->mask_misc = intr->mask_misc_orig;
689 intr->mask_misc_orig = 0;
691 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
692 txgbe_enable_intr(dev);
693 rte_intr_enable(intr_handle);
697 * Interrupt handler triggered by NIC for handling
698 * specific interrupt.
701 * Pointer to interrupt handle.
703 * The address of parameter (struct rte_eth_dev *) registered before.
709 txgbe_dev_interrupt_handler(void *param)
711 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
713 txgbe_dev_interrupt_get_status(dev);
714 txgbe_dev_interrupt_action(dev, dev->intr_handle);
718 * set the IVAR registers, mapping interrupt causes to vectors
720 * pointer to txgbe_hw struct
722 * 0 for Rx, 1 for Tx, -1 for other causes
724 * queue to map the corresponding interrupt to
726 * the vector to map to the corresponding queue
729 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
730 uint8_t queue, uint8_t msix_vector)
734 if (direction == -1) {
736 msix_vector |= TXGBE_IVARMISC_VLD;
738 tmp = rd32(hw, TXGBE_IVARMISC);
739 tmp &= ~(0xFF << idx);
740 tmp |= (msix_vector << idx);
741 wr32(hw, TXGBE_IVARMISC, tmp);
743 /* rx or tx causes */
744 /* Workround for ICR lost */
745 idx = ((16 * (queue & 1)) + (8 * direction));
746 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
747 tmp &= ~(0xFF << idx);
748 tmp |= (msix_vector << idx);
749 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
754 * Sets up the hardware to properly generate MSI-X interrupts
756 * board private structure
759 txgbe_configure_msix(struct rte_eth_dev *dev)
761 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
762 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
763 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
764 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
765 uint32_t vec = TXGBE_MISC_VEC_ID;
768 /* won't configure msix register if no mapping is done
769 * between intr vector and event fd
770 * but if misx has been enabled already, need to configure
771 * auto clean, auto mask and throttling.
773 gpie = rd32(hw, TXGBE_GPIE);
774 if (!rte_intr_dp_is_en(intr_handle) &&
775 !(gpie & TXGBE_GPIE_MSIX))
778 if (rte_intr_allow_others(intr_handle)) {
779 base = TXGBE_RX_VEC_START;
783 /* setup GPIE for MSI-x mode */
784 gpie = rd32(hw, TXGBE_GPIE);
785 gpie |= TXGBE_GPIE_MSIX;
786 wr32(hw, TXGBE_GPIE, gpie);
788 /* Populate the IVAR table and set the ITR values to the
789 * corresponding register.
791 if (rte_intr_dp_is_en(intr_handle)) {
792 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
794 /* by default, 1:1 mapping */
795 txgbe_set_ivar_map(hw, 0, queue_id, vec);
796 intr_handle->intr_vec[queue_id] = vec;
797 if (vec < base + intr_handle->nb_efd - 1)
801 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
803 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
804 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
808 static const struct eth_dev_ops txgbe_eth_dev_ops = {
809 .dev_infos_get = txgbe_dev_info_get,
812 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
813 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
814 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
816 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
817 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
819 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
820 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
822 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
823 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
826 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
827 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);