1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37 struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
43 * The set of PCI devices this driver supports
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48 { .vendor_id = 0, /* sentinel */ },
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52 .nb_max = TXGBE_RING_DESC_MAX,
53 .nb_min = TXGBE_RING_DESC_MIN,
54 .nb_align = TXGBE_RXD_ALIGN,
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58 .nb_max = TXGBE_RING_DESC_MAX,
59 .nb_min = TXGBE_RING_DESC_MIN,
60 .nb_align = TXGBE_TXD_ALIGN,
61 .nb_seg_max = TXGBE_TX_MAX_SEG,
62 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
68 txgbe_is_sfp(struct txgbe_hw *hw)
70 switch (hw->phy.type) {
71 case txgbe_phy_sfp_avago:
72 case txgbe_phy_sfp_ftl:
73 case txgbe_phy_sfp_intel:
74 case txgbe_phy_sfp_unknown:
75 case txgbe_phy_sfp_tyco_passive:
76 case txgbe_phy_sfp_unknown_passive:
84 txgbe_enable_intr(struct rte_eth_dev *dev)
86 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
87 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
89 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
90 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
91 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
96 txgbe_disable_intr(struct txgbe_hw *hw)
98 PMD_INIT_FUNC_TRACE();
100 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
101 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
102 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
107 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
109 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
110 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
111 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
112 const struct rte_memzone *mz;
116 PMD_INIT_FUNC_TRACE();
118 eth_dev->dev_ops = &txgbe_eth_dev_ops;
119 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
120 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
123 * For secondary processes, we don't initialise any further as primary
124 * has already done this work. Only check we don't need a different
125 * RX and TX function.
127 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
128 struct txgbe_tx_queue *txq;
129 /* TX queue function in primary, set by last queue initialized
130 * Tx queue may not initialized by primary process
132 if (eth_dev->data->tx_queues) {
133 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
134 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
135 txgbe_set_tx_function(eth_dev, txq);
137 /* Use default TX function if we get here */
138 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
139 "Using default TX function.");
142 txgbe_set_rx_function(eth_dev);
147 rte_eth_copy_pci_info(eth_dev, pci_dev);
149 /* Vendor and Device ID need to be set before init of shared code */
150 hw->device_id = pci_dev->id.device_id;
151 hw->vendor_id = pci_dev->id.vendor_id;
152 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
153 hw->allow_unsupported_sfp = 1;
155 /* Reserve memory for interrupt status block */
156 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
157 16, TXGBE_ALIGN, SOCKET_ID_ANY);
161 hw->isb_dma = TMZ_PADDR(mz);
162 hw->isb_mem = TMZ_VADDR(mz);
164 /* Initialize the shared code (base driver) */
165 err = txgbe_init_shared_code(hw);
167 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
171 err = hw->rom.init_params(hw);
173 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
177 /* Make sure we have a good EEPROM before we read from it */
178 err = hw->rom.validate_checksum(hw, &csum);
180 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
184 err = hw->mac.init_hw(hw);
187 * Devices with copper phys will fail to initialise if txgbe_init_hw()
188 * is called too soon after the kernel driver unbinding/binding occurs.
189 * The failure occurs in txgbe_identify_phy() for all devices,
190 * but for non-copper devies, txgbe_identify_sfp_module() is
191 * also called. See txgbe_identify_phy(). The reason for the
192 * failure is not known, and only occuts when virtualisation features
193 * are disabled in the bios. A delay of 200ms was found to be enough by
194 * trial-and-error, and is doubled to be safe.
196 if (err && hw->phy.media_type == txgbe_media_type_copper) {
198 err = hw->mac.init_hw(hw);
201 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
204 if (err == TXGBE_ERR_EEPROM_VERSION) {
205 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
206 "LOM. Please be aware there may be issues associated "
207 "with your hardware.");
208 PMD_INIT_LOG(ERR, "If you are experiencing problems "
209 "please contact your hardware representative "
210 "who provided you with this hardware.");
211 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
212 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
215 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
219 /* disable interrupt */
220 txgbe_disable_intr(hw);
222 /* Allocate memory for storing MAC addresses */
223 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
224 hw->mac.num_rar_entries, 0);
225 if (eth_dev->data->mac_addrs == NULL) {
227 "Failed to allocate %u bytes needed to store "
229 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
233 /* Copy the permanent MAC address */
234 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
235 ð_dev->data->mac_addrs[0]);
237 /* Allocate memory for storing hash filter MAC addresses */
238 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
239 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
240 if (eth_dev->data->hash_mac_addrs == NULL) {
242 "Failed to allocate %d bytes needed to store MAC addresses",
243 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
247 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
248 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
249 (int)hw->mac.type, (int)hw->phy.type,
250 (int)hw->phy.sfp_type);
252 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
253 (int)hw->mac.type, (int)hw->phy.type);
255 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
256 eth_dev->data->port_id, pci_dev->id.vendor_id,
257 pci_dev->id.device_id);
259 rte_intr_callback_register(intr_handle,
260 txgbe_dev_interrupt_handler, eth_dev);
262 /* enable uio/vfio intr/eventfd mapping */
263 rte_intr_enable(intr_handle);
265 /* enable support intr */
266 txgbe_enable_intr(eth_dev);
272 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
274 PMD_INIT_FUNC_TRACE();
276 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
279 txgbe_dev_close(eth_dev);
285 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
286 struct rte_pci_device *pci_dev)
288 struct rte_eth_dev *pf_ethdev;
289 struct rte_eth_devargs eth_da;
292 if (pci_dev->device.devargs) {
293 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
298 memset(ð_da, 0, sizeof(eth_da));
301 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
302 sizeof(struct txgbe_adapter),
303 eth_dev_pci_specific_init, pci_dev,
304 eth_txgbe_dev_init, NULL);
306 if (retval || eth_da.nb_representor_ports < 1)
309 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
310 if (pf_ethdev == NULL)
316 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
318 struct rte_eth_dev *ethdev;
320 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
324 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
327 static struct rte_pci_driver rte_txgbe_pmd = {
328 .id_table = pci_id_txgbe_map,
329 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
330 RTE_PCI_DRV_INTR_LSC,
331 .probe = eth_txgbe_pci_probe,
332 .remove = eth_txgbe_pci_remove,
336 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
338 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
343 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
346 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
352 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
353 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
354 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
355 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
360 txgbe_check_mq_mode(struct rte_eth_dev *dev)
362 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
363 uint16_t nb_rx_q = dev->data->nb_rx_queues;
364 uint16_t nb_tx_q = dev->data->nb_tx_queues;
366 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
367 /* check multi-queue mode */
368 switch (dev_conf->rxmode.mq_mode) {
369 case ETH_MQ_RX_VMDQ_DCB:
370 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
372 case ETH_MQ_RX_VMDQ_DCB_RSS:
373 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
374 PMD_INIT_LOG(ERR, "SRIOV active,"
375 " unsupported mq_mode rx %d.",
376 dev_conf->rxmode.mq_mode);
379 case ETH_MQ_RX_VMDQ_RSS:
380 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
381 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
382 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
383 PMD_INIT_LOG(ERR, "SRIOV is active,"
384 " invalid queue number"
385 " for VMDQ RSS, allowed"
386 " value are 1, 2 or 4.");
390 case ETH_MQ_RX_VMDQ_ONLY:
392 /* if nothing mq mode configure, use default scheme */
393 dev->data->dev_conf.rxmode.mq_mode =
396 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
397 /* SRIOV only works in VMDq enable mode */
398 PMD_INIT_LOG(ERR, "SRIOV is active,"
399 " wrong mq_mode rx %d.",
400 dev_conf->rxmode.mq_mode);
404 switch (dev_conf->txmode.mq_mode) {
405 case ETH_MQ_TX_VMDQ_DCB:
406 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
407 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
409 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
410 dev->data->dev_conf.txmode.mq_mode =
415 /* check valid queue number */
416 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
417 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
418 PMD_INIT_LOG(ERR, "SRIOV is active,"
419 " nb_rx_q=%d nb_tx_q=%d queue number"
420 " must be less than or equal to %d.",
422 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
426 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
427 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
431 /* check configuration for vmdb+dcb mode */
432 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
433 const struct rte_eth_vmdq_dcb_conf *conf;
435 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
436 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
437 TXGBE_VMDQ_DCB_NB_QUEUES);
440 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
441 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
442 conf->nb_queue_pools == ETH_32_POOLS)) {
443 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
444 " nb_queue_pools must be %d or %d.",
445 ETH_16_POOLS, ETH_32_POOLS);
449 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
450 const struct rte_eth_vmdq_dcb_tx_conf *conf;
452 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
453 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
454 TXGBE_VMDQ_DCB_NB_QUEUES);
457 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
458 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
459 conf->nb_queue_pools == ETH_32_POOLS)) {
460 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
461 " nb_queue_pools != %d and"
462 " nb_queue_pools != %d.",
463 ETH_16_POOLS, ETH_32_POOLS);
468 /* For DCB mode check our configuration before we go further */
469 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
470 const struct rte_eth_dcb_rx_conf *conf;
472 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
473 if (!(conf->nb_tcs == ETH_4_TCS ||
474 conf->nb_tcs == ETH_8_TCS)) {
475 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
476 " and nb_tcs != %d.",
477 ETH_4_TCS, ETH_8_TCS);
482 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
483 const struct rte_eth_dcb_tx_conf *conf;
485 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
486 if (!(conf->nb_tcs == ETH_4_TCS ||
487 conf->nb_tcs == ETH_8_TCS)) {
488 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
489 " and nb_tcs != %d.",
490 ETH_4_TCS, ETH_8_TCS);
499 txgbe_dev_configure(struct rte_eth_dev *dev)
501 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
502 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
505 PMD_INIT_FUNC_TRACE();
507 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
508 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
510 /* multiple queue mode checking */
511 ret = txgbe_check_mq_mode(dev);
513 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
518 /* set flag to update link status after init */
519 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
522 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
523 * allocation Rx preconditions we will reset it.
525 adapter->rx_bulk_alloc_allowed = true;
531 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
533 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
534 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
537 gpie = rd32(hw, TXGBE_GPIOINTEN);
538 gpie |= TXGBE_GPIOBIT_6;
539 wr32(hw, TXGBE_GPIOINTEN, gpie);
540 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
544 * Set device link up: enable tx.
547 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
549 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
551 if (hw->phy.media_type == txgbe_media_type_copper) {
552 /* Turn on the copper */
553 hw->phy.set_phy_power(hw, true);
555 /* Turn on the laser */
556 hw->mac.enable_tx_laser(hw);
557 txgbe_dev_link_update(dev, 0);
564 * Set device link down: disable tx.
567 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
569 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
571 if (hw->phy.media_type == txgbe_media_type_copper) {
572 /* Turn off the copper */
573 hw->phy.set_phy_power(hw, false);
575 /* Turn off the laser */
576 hw->mac.disable_tx_laser(hw);
577 txgbe_dev_link_update(dev, 0);
584 * Reset and stop device.
587 txgbe_dev_close(struct rte_eth_dev *dev)
589 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
590 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
594 PMD_INIT_FUNC_TRACE();
596 txgbe_dev_free_queues(dev);
598 /* disable uio intr before callback unregister */
599 rte_intr_disable(intr_handle);
602 ret = rte_intr_callback_unregister(intr_handle,
603 txgbe_dev_interrupt_handler, dev);
604 if (ret >= 0 || ret == -ENOENT) {
606 } else if (ret != -EAGAIN) {
608 "intr callback unregister failed: %d",
612 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
614 /* cancel the delay handler before remove dev */
615 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
617 rte_free(dev->data->mac_addrs);
618 dev->data->mac_addrs = NULL;
620 rte_free(dev->data->hash_mac_addrs);
621 dev->data->hash_mac_addrs = NULL;
627 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
629 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
630 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
632 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
633 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
634 dev_info->min_rx_bufsize = 1024;
635 dev_info->max_rx_pktlen = 15872;
636 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
637 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
638 dev_info->max_vfs = pci_dev->max_vfs;
639 dev_info->max_vmdq_pools = ETH_64_POOLS;
640 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
641 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
642 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
643 dev_info->rx_queue_offload_capa);
644 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
645 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
647 dev_info->default_rxconf = (struct rte_eth_rxconf) {
649 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
650 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
651 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
653 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
658 dev_info->default_txconf = (struct rte_eth_txconf) {
660 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
661 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
662 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
664 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
668 dev_info->rx_desc_lim = rx_desc_lim;
669 dev_info->tx_desc_lim = tx_desc_lim;
671 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
672 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
673 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
675 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
676 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
678 /* Driver-preferred Rx/Tx parameters */
679 dev_info->default_rxportconf.burst_size = 32;
680 dev_info->default_txportconf.burst_size = 32;
681 dev_info->default_rxportconf.nb_queues = 1;
682 dev_info->default_txportconf.nb_queues = 1;
683 dev_info->default_rxportconf.ring_size = 256;
684 dev_info->default_txportconf.ring_size = 256;
690 txgbe_dev_setup_link_alarm_handler(void *param)
692 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
693 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
694 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
696 bool autoneg = false;
698 speed = hw->phy.autoneg_advertised;
700 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
702 hw->mac.setup_link(hw, speed, true);
704 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
707 /* return 0 means link status changed, -1 means not changed */
709 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
710 int wait_to_complete)
712 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
713 struct rte_eth_link link;
714 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
715 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
720 memset(&link, 0, sizeof(link));
721 link.link_status = ETH_LINK_DOWN;
722 link.link_speed = ETH_SPEED_NUM_NONE;
723 link.link_duplex = ETH_LINK_HALF_DUPLEX;
724 link.link_autoneg = ETH_LINK_AUTONEG;
726 hw->mac.get_link_status = true;
728 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
729 return rte_eth_linkstatus_set(dev, &link);
731 /* check if it needs to wait to complete, if lsc interrupt is enabled */
732 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
735 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
738 link.link_speed = ETH_SPEED_NUM_100M;
739 link.link_duplex = ETH_LINK_FULL_DUPLEX;
740 return rte_eth_linkstatus_set(dev, &link);
744 if (hw->phy.media_type == txgbe_media_type_fiber) {
745 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
746 rte_eal_alarm_set(10,
747 txgbe_dev_setup_link_alarm_handler, dev);
749 return rte_eth_linkstatus_set(dev, &link);
752 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
753 link.link_status = ETH_LINK_UP;
754 link.link_duplex = ETH_LINK_FULL_DUPLEX;
756 switch (link_speed) {
758 case TXGBE_LINK_SPEED_UNKNOWN:
759 link.link_duplex = ETH_LINK_FULL_DUPLEX;
760 link.link_speed = ETH_SPEED_NUM_100M;
763 case TXGBE_LINK_SPEED_100M_FULL:
764 link.link_speed = ETH_SPEED_NUM_100M;
767 case TXGBE_LINK_SPEED_1GB_FULL:
768 link.link_speed = ETH_SPEED_NUM_1G;
771 case TXGBE_LINK_SPEED_2_5GB_FULL:
772 link.link_speed = ETH_SPEED_NUM_2_5G;
775 case TXGBE_LINK_SPEED_5GB_FULL:
776 link.link_speed = ETH_SPEED_NUM_5G;
779 case TXGBE_LINK_SPEED_10GB_FULL:
780 link.link_speed = ETH_SPEED_NUM_10G;
784 return rte_eth_linkstatus_set(dev, &link);
788 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
790 return txgbe_dev_link_update_share(dev, wait_to_complete);
794 * It clears the interrupt causes and enables the interrupt.
795 * It will be called once only during nic initialized.
798 * Pointer to struct rte_eth_dev.
803 * - On success, zero.
804 * - On failure, a negative value.
807 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
809 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
811 txgbe_dev_link_status_print(dev);
813 intr->mask_misc |= TXGBE_ICRMISC_LSC;
815 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
821 * It clears the interrupt causes and enables the interrupt.
822 * It will be called once only during nic initialized.
825 * Pointer to struct rte_eth_dev.
828 * - On success, zero.
829 * - On failure, a negative value.
832 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
834 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
836 intr->mask[0] |= TXGBE_ICR_MASK;
837 intr->mask[1] |= TXGBE_ICR_MASK;
843 * It clears the interrupt causes and enables the interrupt.
844 * It will be called once only during nic initialized.
847 * Pointer to struct rte_eth_dev.
850 * - On success, zero.
851 * - On failure, a negative value.
854 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
856 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
858 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
864 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
867 * Pointer to struct rte_eth_dev.
870 * - On success, zero.
871 * - On failure, a negative value.
874 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
877 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
878 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
880 /* clear all cause mask */
881 txgbe_disable_intr(hw);
883 /* read-on-clear nic registers here */
884 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
885 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
889 /* set flag for async link update */
890 if (eicr & TXGBE_ICRMISC_LSC)
891 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
893 if (eicr & TXGBE_ICRMISC_VFMBX)
894 intr->flags |= TXGBE_FLAG_MAILBOX;
896 if (eicr & TXGBE_ICRMISC_LNKSEC)
897 intr->flags |= TXGBE_FLAG_MACSEC;
899 if (eicr & TXGBE_ICRMISC_GPIO)
900 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
906 * It gets and then prints the link status.
909 * Pointer to struct rte_eth_dev.
912 * - On success, zero.
913 * - On failure, a negative value.
916 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
918 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
919 struct rte_eth_link link;
921 rte_eth_linkstatus_get(dev, &link);
923 if (link.link_status) {
924 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
925 (int)(dev->data->port_id),
926 (unsigned int)link.link_speed,
927 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
928 "full-duplex" : "half-duplex");
930 PMD_INIT_LOG(INFO, " Port %d: Link Down",
931 (int)(dev->data->port_id));
933 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
934 pci_dev->addr.domain,
937 pci_dev->addr.function);
941 * It executes link_update after knowing an interrupt occurred.
944 * Pointer to struct rte_eth_dev.
947 * - On success, zero.
948 * - On failure, a negative value.
951 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
952 struct rte_intr_handle *intr_handle)
954 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
956 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
958 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
960 if (intr->flags & TXGBE_FLAG_MAILBOX)
961 intr->flags &= ~TXGBE_FLAG_MAILBOX;
963 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
964 hw->phy.handle_lasi(hw);
965 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
968 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
969 struct rte_eth_link link;
971 /*get the link status before link update, for predicting later*/
972 rte_eth_linkstatus_get(dev, &link);
974 txgbe_dev_link_update(dev, 0);
977 if (!link.link_status)
978 /* handle it 1 sec later, wait it being stable */
979 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
982 /* handle it 4 sec later, wait it being stable */
983 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
985 txgbe_dev_link_status_print(dev);
986 if (rte_eal_alarm_set(timeout * 1000,
987 txgbe_dev_interrupt_delayed_handler,
989 PMD_DRV_LOG(ERR, "Error setting alarm");
991 /* remember original mask */
992 intr->mask_misc_orig = intr->mask_misc;
993 /* only disable lsc interrupt */
994 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
998 PMD_DRV_LOG(DEBUG, "enable intr immediately");
999 txgbe_enable_intr(dev);
1000 rte_intr_enable(intr_handle);
1006 * Interrupt handler which shall be registered for alarm callback for delayed
1007 * handling specific interrupt to wait for the stable nic state. As the
1008 * NIC interrupt state is not stable for txgbe after link is just down,
1009 * it needs to wait 4 seconds to get the stable status.
1012 * Pointer to interrupt handle.
1014 * The address of parameter (struct rte_eth_dev *) registered before.
1020 txgbe_dev_interrupt_delayed_handler(void *param)
1022 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1023 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1024 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1025 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1026 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1029 txgbe_disable_intr(hw);
1031 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1033 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1034 hw->phy.handle_lasi(hw);
1035 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1038 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1039 txgbe_dev_link_update(dev, 0);
1040 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1041 txgbe_dev_link_status_print(dev);
1042 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1046 if (intr->flags & TXGBE_FLAG_MACSEC) {
1047 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1049 intr->flags &= ~TXGBE_FLAG_MACSEC;
1052 /* restore original mask */
1053 intr->mask_misc = intr->mask_misc_orig;
1054 intr->mask_misc_orig = 0;
1056 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1057 txgbe_enable_intr(dev);
1058 rte_intr_enable(intr_handle);
1062 * Interrupt handler triggered by NIC for handling
1063 * specific interrupt.
1066 * Pointer to interrupt handle.
1068 * The address of parameter (struct rte_eth_dev *) registered before.
1074 txgbe_dev_interrupt_handler(void *param)
1076 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1078 txgbe_dev_interrupt_get_status(dev);
1079 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1083 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1084 uint32_t index, uint32_t pool)
1086 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1087 uint32_t enable_addr = 1;
1089 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1094 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1096 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1098 txgbe_clear_rar(hw, index);
1102 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1104 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1106 txgbe_remove_rar(dev, 0);
1107 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1113 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1115 uint32_t vector = 0;
1117 switch (hw->mac.mc_filter_type) {
1118 case 0: /* use bits [47:36] of the address */
1119 vector = ((uc_addr->addr_bytes[4] >> 4) |
1120 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1122 case 1: /* use bits [46:35] of the address */
1123 vector = ((uc_addr->addr_bytes[4] >> 3) |
1124 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1126 case 2: /* use bits [45:34] of the address */
1127 vector = ((uc_addr->addr_bytes[4] >> 2) |
1128 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1130 case 3: /* use bits [43:32] of the address */
1131 vector = ((uc_addr->addr_bytes[4]) |
1132 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1134 default: /* Invalid mc_filter_type */
1138 /* vector can only be 12-bits or boundary will be exceeded */
1144 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1145 struct rte_ether_addr *mac_addr, uint8_t on)
1153 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1154 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1156 /* The UTA table only exists on pf hardware */
1157 if (hw->mac.type < txgbe_mac_raptor)
1160 vector = txgbe_uta_vector(hw, mac_addr);
1161 uta_idx = (vector >> 5) & 0x7F;
1162 uta_mask = 0x1UL << (vector & 0x1F);
1164 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1167 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1169 uta_info->uta_in_use++;
1170 reg_val |= uta_mask;
1171 uta_info->uta_shadow[uta_idx] |= uta_mask;
1173 uta_info->uta_in_use--;
1174 reg_val &= ~uta_mask;
1175 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1178 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1180 psrctl = rd32(hw, TXGBE_PSRCTL);
1181 if (uta_info->uta_in_use > 0)
1182 psrctl |= TXGBE_PSRCTL_UCHFENA;
1184 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1186 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1187 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1188 wr32(hw, TXGBE_PSRCTL, psrctl);
1194 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1196 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1197 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1201 /* The UTA table only exists on pf hardware */
1202 if (hw->mac.type < txgbe_mac_raptor)
1206 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1207 uta_info->uta_shadow[i] = ~0;
1208 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1211 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1212 uta_info->uta_shadow[i] = 0;
1213 wr32(hw, TXGBE_UCADDRTBL(i), 0);
1217 psrctl = rd32(hw, TXGBE_PSRCTL);
1219 psrctl |= TXGBE_PSRCTL_UCHFENA;
1221 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1223 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1224 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1225 wr32(hw, TXGBE_PSRCTL, psrctl);
1231 * set the IVAR registers, mapping interrupt causes to vectors
1233 * pointer to txgbe_hw struct
1235 * 0 for Rx, 1 for Tx, -1 for other causes
1237 * queue to map the corresponding interrupt to
1239 * the vector to map to the corresponding queue
1242 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1243 uint8_t queue, uint8_t msix_vector)
1247 if (direction == -1) {
1249 msix_vector |= TXGBE_IVARMISC_VLD;
1251 tmp = rd32(hw, TXGBE_IVARMISC);
1252 tmp &= ~(0xFF << idx);
1253 tmp |= (msix_vector << idx);
1254 wr32(hw, TXGBE_IVARMISC, tmp);
1256 /* rx or tx causes */
1257 /* Workround for ICR lost */
1258 idx = ((16 * (queue & 1)) + (8 * direction));
1259 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1260 tmp &= ~(0xFF << idx);
1261 tmp |= (msix_vector << idx);
1262 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1267 * Sets up the hardware to properly generate MSI-X interrupts
1269 * board private structure
1272 txgbe_configure_msix(struct rte_eth_dev *dev)
1274 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1275 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1276 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1277 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1278 uint32_t vec = TXGBE_MISC_VEC_ID;
1281 /* won't configure msix register if no mapping is done
1282 * between intr vector and event fd
1283 * but if misx has been enabled already, need to configure
1284 * auto clean, auto mask and throttling.
1286 gpie = rd32(hw, TXGBE_GPIE);
1287 if (!rte_intr_dp_is_en(intr_handle) &&
1288 !(gpie & TXGBE_GPIE_MSIX))
1291 if (rte_intr_allow_others(intr_handle)) {
1292 base = TXGBE_RX_VEC_START;
1296 /* setup GPIE for MSI-x mode */
1297 gpie = rd32(hw, TXGBE_GPIE);
1298 gpie |= TXGBE_GPIE_MSIX;
1299 wr32(hw, TXGBE_GPIE, gpie);
1301 /* Populate the IVAR table and set the ITR values to the
1302 * corresponding register.
1304 if (rte_intr_dp_is_en(intr_handle)) {
1305 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1307 /* by default, 1:1 mapping */
1308 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1309 intr_handle->intr_vec[queue_id] = vec;
1310 if (vec < base + intr_handle->nb_efd - 1)
1314 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1316 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1317 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1322 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1323 u8 **mc_addr_ptr, u32 *vmdq)
1328 mc_addr = *mc_addr_ptr;
1329 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1334 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1335 struct rte_ether_addr *mc_addr_set,
1336 uint32_t nb_mc_addr)
1338 struct txgbe_hw *hw;
1341 hw = TXGBE_DEV_HW(dev);
1342 mc_addr_list = (u8 *)mc_addr_set;
1343 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1344 txgbe_dev_addr_list_itr, TRUE);
1347 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1348 .dev_configure = txgbe_dev_configure,
1349 .dev_infos_get = txgbe_dev_info_get,
1350 .dev_set_link_up = txgbe_dev_set_link_up,
1351 .dev_set_link_down = txgbe_dev_set_link_down,
1352 .rx_queue_start = txgbe_dev_rx_queue_start,
1353 .rx_queue_stop = txgbe_dev_rx_queue_stop,
1354 .tx_queue_start = txgbe_dev_tx_queue_start,
1355 .tx_queue_stop = txgbe_dev_tx_queue_stop,
1356 .rx_queue_setup = txgbe_dev_rx_queue_setup,
1357 .rx_queue_release = txgbe_dev_rx_queue_release,
1358 .tx_queue_setup = txgbe_dev_tx_queue_setup,
1359 .tx_queue_release = txgbe_dev_tx_queue_release,
1360 .mac_addr_add = txgbe_add_rar,
1361 .mac_addr_remove = txgbe_remove_rar,
1362 .mac_addr_set = txgbe_set_default_mac_addr,
1363 .uc_hash_table_set = txgbe_uc_hash_table_set,
1364 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
1365 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
1368 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1369 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1370 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1372 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1373 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1375 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1376 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1378 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1379 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1382 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1383 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);