1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37 struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
43 * The set of PCI devices this driver supports
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48 { .vendor_id = 0, /* sentinel */ },
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52 .nb_max = TXGBE_RING_DESC_MAX,
53 .nb_min = TXGBE_RING_DESC_MIN,
54 .nb_align = TXGBE_RXD_ALIGN,
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58 .nb_max = TXGBE_RING_DESC_MAX,
59 .nb_min = TXGBE_RING_DESC_MIN,
60 .nb_align = TXGBE_TXD_ALIGN,
61 .nb_seg_max = TXGBE_TX_MAX_SEG,
62 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
68 txgbe_is_sfp(struct txgbe_hw *hw)
70 switch (hw->phy.type) {
71 case txgbe_phy_sfp_avago:
72 case txgbe_phy_sfp_ftl:
73 case txgbe_phy_sfp_intel:
74 case txgbe_phy_sfp_unknown:
75 case txgbe_phy_sfp_tyco_passive:
76 case txgbe_phy_sfp_unknown_passive:
84 txgbe_enable_intr(struct rte_eth_dev *dev)
86 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
87 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
89 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
90 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
91 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
96 txgbe_disable_intr(struct txgbe_hw *hw)
98 PMD_INIT_FUNC_TRACE();
100 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
101 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
102 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
107 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
109 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
110 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
111 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
112 const struct rte_memzone *mz;
116 PMD_INIT_FUNC_TRACE();
118 eth_dev->dev_ops = &txgbe_eth_dev_ops;
119 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
120 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
121 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
124 * For secondary processes, we don't initialise any further as primary
125 * has already done this work. Only check we don't need a different
126 * RX and TX function.
128 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
129 struct txgbe_tx_queue *txq;
130 /* TX queue function in primary, set by last queue initialized
131 * Tx queue may not initialized by primary process
133 if (eth_dev->data->tx_queues) {
134 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
135 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
136 txgbe_set_tx_function(eth_dev, txq);
138 /* Use default TX function if we get here */
139 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
140 "Using default TX function.");
143 txgbe_set_rx_function(eth_dev);
148 rte_eth_copy_pci_info(eth_dev, pci_dev);
150 /* Vendor and Device ID need to be set before init of shared code */
151 hw->device_id = pci_dev->id.device_id;
152 hw->vendor_id = pci_dev->id.vendor_id;
153 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
154 hw->allow_unsupported_sfp = 1;
156 /* Reserve memory for interrupt status block */
157 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
158 16, TXGBE_ALIGN, SOCKET_ID_ANY);
162 hw->isb_dma = TMZ_PADDR(mz);
163 hw->isb_mem = TMZ_VADDR(mz);
165 /* Initialize the shared code (base driver) */
166 err = txgbe_init_shared_code(hw);
168 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
172 err = hw->rom.init_params(hw);
174 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
178 /* Make sure we have a good EEPROM before we read from it */
179 err = hw->rom.validate_checksum(hw, &csum);
181 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
185 err = hw->mac.init_hw(hw);
188 * Devices with copper phys will fail to initialise if txgbe_init_hw()
189 * is called too soon after the kernel driver unbinding/binding occurs.
190 * The failure occurs in txgbe_identify_phy() for all devices,
191 * but for non-copper devies, txgbe_identify_sfp_module() is
192 * also called. See txgbe_identify_phy(). The reason for the
193 * failure is not known, and only occuts when virtualisation features
194 * are disabled in the bios. A delay of 200ms was found to be enough by
195 * trial-and-error, and is doubled to be safe.
197 if (err && hw->phy.media_type == txgbe_media_type_copper) {
199 err = hw->mac.init_hw(hw);
202 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
205 if (err == TXGBE_ERR_EEPROM_VERSION) {
206 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
207 "LOM. Please be aware there may be issues associated "
208 "with your hardware.");
209 PMD_INIT_LOG(ERR, "If you are experiencing problems "
210 "please contact your hardware representative "
211 "who provided you with this hardware.");
212 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
213 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
216 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
220 /* disable interrupt */
221 txgbe_disable_intr(hw);
223 /* Allocate memory for storing MAC addresses */
224 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
225 hw->mac.num_rar_entries, 0);
226 if (eth_dev->data->mac_addrs == NULL) {
228 "Failed to allocate %u bytes needed to store "
230 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
234 /* Copy the permanent MAC address */
235 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
236 ð_dev->data->mac_addrs[0]);
238 /* Allocate memory for storing hash filter MAC addresses */
239 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
240 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
241 if (eth_dev->data->hash_mac_addrs == NULL) {
243 "Failed to allocate %d bytes needed to store MAC addresses",
244 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
248 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
249 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
250 (int)hw->mac.type, (int)hw->phy.type,
251 (int)hw->phy.sfp_type);
253 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
254 (int)hw->mac.type, (int)hw->phy.type);
256 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
257 eth_dev->data->port_id, pci_dev->id.vendor_id,
258 pci_dev->id.device_id);
260 rte_intr_callback_register(intr_handle,
261 txgbe_dev_interrupt_handler, eth_dev);
263 /* enable uio/vfio intr/eventfd mapping */
264 rte_intr_enable(intr_handle);
266 /* enable support intr */
267 txgbe_enable_intr(eth_dev);
273 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
275 PMD_INIT_FUNC_TRACE();
277 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
280 txgbe_dev_close(eth_dev);
286 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
287 struct rte_pci_device *pci_dev)
289 struct rte_eth_dev *pf_ethdev;
290 struct rte_eth_devargs eth_da;
293 if (pci_dev->device.devargs) {
294 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
299 memset(ð_da, 0, sizeof(eth_da));
302 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
303 sizeof(struct txgbe_adapter),
304 eth_dev_pci_specific_init, pci_dev,
305 eth_txgbe_dev_init, NULL);
307 if (retval || eth_da.nb_representor_ports < 1)
310 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
311 if (pf_ethdev == NULL)
317 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
319 struct rte_eth_dev *ethdev;
321 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
325 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
328 static struct rte_pci_driver rte_txgbe_pmd = {
329 .id_table = pci_id_txgbe_map,
330 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
331 RTE_PCI_DRV_INTR_LSC,
332 .probe = eth_txgbe_pci_probe,
333 .remove = eth_txgbe_pci_remove,
337 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
339 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
344 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
347 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
353 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
354 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
355 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
356 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
361 txgbe_check_mq_mode(struct rte_eth_dev *dev)
363 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
364 uint16_t nb_rx_q = dev->data->nb_rx_queues;
365 uint16_t nb_tx_q = dev->data->nb_tx_queues;
367 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
368 /* check multi-queue mode */
369 switch (dev_conf->rxmode.mq_mode) {
370 case ETH_MQ_RX_VMDQ_DCB:
371 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
373 case ETH_MQ_RX_VMDQ_DCB_RSS:
374 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
375 PMD_INIT_LOG(ERR, "SRIOV active,"
376 " unsupported mq_mode rx %d.",
377 dev_conf->rxmode.mq_mode);
380 case ETH_MQ_RX_VMDQ_RSS:
381 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
382 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
383 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
384 PMD_INIT_LOG(ERR, "SRIOV is active,"
385 " invalid queue number"
386 " for VMDQ RSS, allowed"
387 " value are 1, 2 or 4.");
391 case ETH_MQ_RX_VMDQ_ONLY:
393 /* if nothing mq mode configure, use default scheme */
394 dev->data->dev_conf.rxmode.mq_mode =
397 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
398 /* SRIOV only works in VMDq enable mode */
399 PMD_INIT_LOG(ERR, "SRIOV is active,"
400 " wrong mq_mode rx %d.",
401 dev_conf->rxmode.mq_mode);
405 switch (dev_conf->txmode.mq_mode) {
406 case ETH_MQ_TX_VMDQ_DCB:
407 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
408 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
410 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
411 dev->data->dev_conf.txmode.mq_mode =
416 /* check valid queue number */
417 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
418 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
419 PMD_INIT_LOG(ERR, "SRIOV is active,"
420 " nb_rx_q=%d nb_tx_q=%d queue number"
421 " must be less than or equal to %d.",
423 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
427 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
428 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
432 /* check configuration for vmdb+dcb mode */
433 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
434 const struct rte_eth_vmdq_dcb_conf *conf;
436 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
437 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
438 TXGBE_VMDQ_DCB_NB_QUEUES);
441 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
442 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
443 conf->nb_queue_pools == ETH_32_POOLS)) {
444 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
445 " nb_queue_pools must be %d or %d.",
446 ETH_16_POOLS, ETH_32_POOLS);
450 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
451 const struct rte_eth_vmdq_dcb_tx_conf *conf;
453 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
454 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
455 TXGBE_VMDQ_DCB_NB_QUEUES);
458 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
459 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
460 conf->nb_queue_pools == ETH_32_POOLS)) {
461 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
462 " nb_queue_pools != %d and"
463 " nb_queue_pools != %d.",
464 ETH_16_POOLS, ETH_32_POOLS);
469 /* For DCB mode check our configuration before we go further */
470 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
471 const struct rte_eth_dcb_rx_conf *conf;
473 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
474 if (!(conf->nb_tcs == ETH_4_TCS ||
475 conf->nb_tcs == ETH_8_TCS)) {
476 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
477 " and nb_tcs != %d.",
478 ETH_4_TCS, ETH_8_TCS);
483 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
484 const struct rte_eth_dcb_tx_conf *conf;
486 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
487 if (!(conf->nb_tcs == ETH_4_TCS ||
488 conf->nb_tcs == ETH_8_TCS)) {
489 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
490 " and nb_tcs != %d.",
491 ETH_4_TCS, ETH_8_TCS);
500 txgbe_dev_configure(struct rte_eth_dev *dev)
502 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
503 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
506 PMD_INIT_FUNC_TRACE();
508 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
509 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
511 /* multiple queue mode checking */
512 ret = txgbe_check_mq_mode(dev);
514 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
519 /* set flag to update link status after init */
520 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
523 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
524 * allocation Rx preconditions we will reset it.
526 adapter->rx_bulk_alloc_allowed = true;
532 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
534 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
535 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
538 gpie = rd32(hw, TXGBE_GPIOINTEN);
539 gpie |= TXGBE_GPIOBIT_6;
540 wr32(hw, TXGBE_GPIOINTEN, gpie);
541 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
545 * Set device link up: enable tx.
548 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
550 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
552 if (hw->phy.media_type == txgbe_media_type_copper) {
553 /* Turn on the copper */
554 hw->phy.set_phy_power(hw, true);
556 /* Turn on the laser */
557 hw->mac.enable_tx_laser(hw);
558 txgbe_dev_link_update(dev, 0);
565 * Set device link down: disable tx.
568 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
570 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
572 if (hw->phy.media_type == txgbe_media_type_copper) {
573 /* Turn off the copper */
574 hw->phy.set_phy_power(hw, false);
576 /* Turn off the laser */
577 hw->mac.disable_tx_laser(hw);
578 txgbe_dev_link_update(dev, 0);
585 * Reset and stop device.
588 txgbe_dev_close(struct rte_eth_dev *dev)
590 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
591 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
595 PMD_INIT_FUNC_TRACE();
597 txgbe_dev_free_queues(dev);
599 /* disable uio intr before callback unregister */
600 rte_intr_disable(intr_handle);
603 ret = rte_intr_callback_unregister(intr_handle,
604 txgbe_dev_interrupt_handler, dev);
605 if (ret >= 0 || ret == -ENOENT) {
607 } else if (ret != -EAGAIN) {
609 "intr callback unregister failed: %d",
613 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
615 /* cancel the delay handler before remove dev */
616 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
618 rte_free(dev->data->mac_addrs);
619 dev->data->mac_addrs = NULL;
621 rte_free(dev->data->hash_mac_addrs);
622 dev->data->hash_mac_addrs = NULL;
628 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
630 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
631 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
633 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
634 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
635 dev_info->min_rx_bufsize = 1024;
636 dev_info->max_rx_pktlen = 15872;
637 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
638 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
639 dev_info->max_vfs = pci_dev->max_vfs;
640 dev_info->max_vmdq_pools = ETH_64_POOLS;
641 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
642 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
643 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
644 dev_info->rx_queue_offload_capa);
645 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
646 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
648 dev_info->default_rxconf = (struct rte_eth_rxconf) {
650 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
651 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
652 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
654 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
659 dev_info->default_txconf = (struct rte_eth_txconf) {
661 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
662 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
663 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
665 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
669 dev_info->rx_desc_lim = rx_desc_lim;
670 dev_info->tx_desc_lim = tx_desc_lim;
672 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
673 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
674 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
676 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
677 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
679 /* Driver-preferred Rx/Tx parameters */
680 dev_info->default_rxportconf.burst_size = 32;
681 dev_info->default_txportconf.burst_size = 32;
682 dev_info->default_rxportconf.nb_queues = 1;
683 dev_info->default_txportconf.nb_queues = 1;
684 dev_info->default_rxportconf.ring_size = 256;
685 dev_info->default_txportconf.ring_size = 256;
691 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
693 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
694 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
695 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
696 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
697 return txgbe_get_supported_ptypes();
703 txgbe_dev_setup_link_alarm_handler(void *param)
705 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
706 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
707 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
709 bool autoneg = false;
711 speed = hw->phy.autoneg_advertised;
713 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
715 hw->mac.setup_link(hw, speed, true);
717 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
720 /* return 0 means link status changed, -1 means not changed */
722 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
723 int wait_to_complete)
725 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
726 struct rte_eth_link link;
727 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
728 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
733 memset(&link, 0, sizeof(link));
734 link.link_status = ETH_LINK_DOWN;
735 link.link_speed = ETH_SPEED_NUM_NONE;
736 link.link_duplex = ETH_LINK_HALF_DUPLEX;
737 link.link_autoneg = ETH_LINK_AUTONEG;
739 hw->mac.get_link_status = true;
741 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
742 return rte_eth_linkstatus_set(dev, &link);
744 /* check if it needs to wait to complete, if lsc interrupt is enabled */
745 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
748 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
751 link.link_speed = ETH_SPEED_NUM_100M;
752 link.link_duplex = ETH_LINK_FULL_DUPLEX;
753 return rte_eth_linkstatus_set(dev, &link);
757 if (hw->phy.media_type == txgbe_media_type_fiber) {
758 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
759 rte_eal_alarm_set(10,
760 txgbe_dev_setup_link_alarm_handler, dev);
762 return rte_eth_linkstatus_set(dev, &link);
765 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
766 link.link_status = ETH_LINK_UP;
767 link.link_duplex = ETH_LINK_FULL_DUPLEX;
769 switch (link_speed) {
771 case TXGBE_LINK_SPEED_UNKNOWN:
772 link.link_duplex = ETH_LINK_FULL_DUPLEX;
773 link.link_speed = ETH_SPEED_NUM_100M;
776 case TXGBE_LINK_SPEED_100M_FULL:
777 link.link_speed = ETH_SPEED_NUM_100M;
780 case TXGBE_LINK_SPEED_1GB_FULL:
781 link.link_speed = ETH_SPEED_NUM_1G;
784 case TXGBE_LINK_SPEED_2_5GB_FULL:
785 link.link_speed = ETH_SPEED_NUM_2_5G;
788 case TXGBE_LINK_SPEED_5GB_FULL:
789 link.link_speed = ETH_SPEED_NUM_5G;
792 case TXGBE_LINK_SPEED_10GB_FULL:
793 link.link_speed = ETH_SPEED_NUM_10G;
797 return rte_eth_linkstatus_set(dev, &link);
801 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
803 return txgbe_dev_link_update_share(dev, wait_to_complete);
807 * It clears the interrupt causes and enables the interrupt.
808 * It will be called once only during nic initialized.
811 * Pointer to struct rte_eth_dev.
816 * - On success, zero.
817 * - On failure, a negative value.
820 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
822 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
824 txgbe_dev_link_status_print(dev);
826 intr->mask_misc |= TXGBE_ICRMISC_LSC;
828 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
834 * It clears the interrupt causes and enables the interrupt.
835 * It will be called once only during nic initialized.
838 * Pointer to struct rte_eth_dev.
841 * - On success, zero.
842 * - On failure, a negative value.
845 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
847 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
849 intr->mask[0] |= TXGBE_ICR_MASK;
850 intr->mask[1] |= TXGBE_ICR_MASK;
856 * It clears the interrupt causes and enables the interrupt.
857 * It will be called once only during nic initialized.
860 * Pointer to struct rte_eth_dev.
863 * - On success, zero.
864 * - On failure, a negative value.
867 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
869 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
871 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
877 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
880 * Pointer to struct rte_eth_dev.
883 * - On success, zero.
884 * - On failure, a negative value.
887 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
890 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
891 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
893 /* clear all cause mask */
894 txgbe_disable_intr(hw);
896 /* read-on-clear nic registers here */
897 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
898 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
902 /* set flag for async link update */
903 if (eicr & TXGBE_ICRMISC_LSC)
904 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
906 if (eicr & TXGBE_ICRMISC_VFMBX)
907 intr->flags |= TXGBE_FLAG_MAILBOX;
909 if (eicr & TXGBE_ICRMISC_LNKSEC)
910 intr->flags |= TXGBE_FLAG_MACSEC;
912 if (eicr & TXGBE_ICRMISC_GPIO)
913 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
919 * It gets and then prints the link status.
922 * Pointer to struct rte_eth_dev.
925 * - On success, zero.
926 * - On failure, a negative value.
929 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
931 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
932 struct rte_eth_link link;
934 rte_eth_linkstatus_get(dev, &link);
936 if (link.link_status) {
937 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
938 (int)(dev->data->port_id),
939 (unsigned int)link.link_speed,
940 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
941 "full-duplex" : "half-duplex");
943 PMD_INIT_LOG(INFO, " Port %d: Link Down",
944 (int)(dev->data->port_id));
946 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
947 pci_dev->addr.domain,
950 pci_dev->addr.function);
954 * It executes link_update after knowing an interrupt occurred.
957 * Pointer to struct rte_eth_dev.
960 * - On success, zero.
961 * - On failure, a negative value.
964 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
965 struct rte_intr_handle *intr_handle)
967 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
969 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
971 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
973 if (intr->flags & TXGBE_FLAG_MAILBOX)
974 intr->flags &= ~TXGBE_FLAG_MAILBOX;
976 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
977 hw->phy.handle_lasi(hw);
978 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
981 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
982 struct rte_eth_link link;
984 /*get the link status before link update, for predicting later*/
985 rte_eth_linkstatus_get(dev, &link);
987 txgbe_dev_link_update(dev, 0);
990 if (!link.link_status)
991 /* handle it 1 sec later, wait it being stable */
992 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
995 /* handle it 4 sec later, wait it being stable */
996 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
998 txgbe_dev_link_status_print(dev);
999 if (rte_eal_alarm_set(timeout * 1000,
1000 txgbe_dev_interrupt_delayed_handler,
1002 PMD_DRV_LOG(ERR, "Error setting alarm");
1004 /* remember original mask */
1005 intr->mask_misc_orig = intr->mask_misc;
1006 /* only disable lsc interrupt */
1007 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1011 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1012 txgbe_enable_intr(dev);
1013 rte_intr_enable(intr_handle);
1019 * Interrupt handler which shall be registered for alarm callback for delayed
1020 * handling specific interrupt to wait for the stable nic state. As the
1021 * NIC interrupt state is not stable for txgbe after link is just down,
1022 * it needs to wait 4 seconds to get the stable status.
1025 * Pointer to interrupt handle.
1027 * The address of parameter (struct rte_eth_dev *) registered before.
1033 txgbe_dev_interrupt_delayed_handler(void *param)
1035 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1036 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1037 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1038 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1039 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1042 txgbe_disable_intr(hw);
1044 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1046 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1047 hw->phy.handle_lasi(hw);
1048 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1051 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1052 txgbe_dev_link_update(dev, 0);
1053 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1054 txgbe_dev_link_status_print(dev);
1055 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1059 if (intr->flags & TXGBE_FLAG_MACSEC) {
1060 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1062 intr->flags &= ~TXGBE_FLAG_MACSEC;
1065 /* restore original mask */
1066 intr->mask_misc = intr->mask_misc_orig;
1067 intr->mask_misc_orig = 0;
1069 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1070 txgbe_enable_intr(dev);
1071 rte_intr_enable(intr_handle);
1075 * Interrupt handler triggered by NIC for handling
1076 * specific interrupt.
1079 * Pointer to interrupt handle.
1081 * The address of parameter (struct rte_eth_dev *) registered before.
1087 txgbe_dev_interrupt_handler(void *param)
1089 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1091 txgbe_dev_interrupt_get_status(dev);
1092 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1096 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1097 uint32_t index, uint32_t pool)
1099 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1100 uint32_t enable_addr = 1;
1102 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1107 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1109 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1111 txgbe_clear_rar(hw, index);
1115 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1117 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1119 txgbe_remove_rar(dev, 0);
1120 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1126 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1128 uint32_t vector = 0;
1130 switch (hw->mac.mc_filter_type) {
1131 case 0: /* use bits [47:36] of the address */
1132 vector = ((uc_addr->addr_bytes[4] >> 4) |
1133 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1135 case 1: /* use bits [46:35] of the address */
1136 vector = ((uc_addr->addr_bytes[4] >> 3) |
1137 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1139 case 2: /* use bits [45:34] of the address */
1140 vector = ((uc_addr->addr_bytes[4] >> 2) |
1141 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1143 case 3: /* use bits [43:32] of the address */
1144 vector = ((uc_addr->addr_bytes[4]) |
1145 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1147 default: /* Invalid mc_filter_type */
1151 /* vector can only be 12-bits or boundary will be exceeded */
1157 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1158 struct rte_ether_addr *mac_addr, uint8_t on)
1166 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1167 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1169 /* The UTA table only exists on pf hardware */
1170 if (hw->mac.type < txgbe_mac_raptor)
1173 vector = txgbe_uta_vector(hw, mac_addr);
1174 uta_idx = (vector >> 5) & 0x7F;
1175 uta_mask = 0x1UL << (vector & 0x1F);
1177 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1180 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1182 uta_info->uta_in_use++;
1183 reg_val |= uta_mask;
1184 uta_info->uta_shadow[uta_idx] |= uta_mask;
1186 uta_info->uta_in_use--;
1187 reg_val &= ~uta_mask;
1188 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1191 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1193 psrctl = rd32(hw, TXGBE_PSRCTL);
1194 if (uta_info->uta_in_use > 0)
1195 psrctl |= TXGBE_PSRCTL_UCHFENA;
1197 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1199 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1200 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1201 wr32(hw, TXGBE_PSRCTL, psrctl);
1207 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1209 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1210 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1214 /* The UTA table only exists on pf hardware */
1215 if (hw->mac.type < txgbe_mac_raptor)
1219 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1220 uta_info->uta_shadow[i] = ~0;
1221 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1224 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1225 uta_info->uta_shadow[i] = 0;
1226 wr32(hw, TXGBE_UCADDRTBL(i), 0);
1230 psrctl = rd32(hw, TXGBE_PSRCTL);
1232 psrctl |= TXGBE_PSRCTL_UCHFENA;
1234 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1236 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1237 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1238 wr32(hw, TXGBE_PSRCTL, psrctl);
1244 * set the IVAR registers, mapping interrupt causes to vectors
1246 * pointer to txgbe_hw struct
1248 * 0 for Rx, 1 for Tx, -1 for other causes
1250 * queue to map the corresponding interrupt to
1252 * the vector to map to the corresponding queue
1255 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1256 uint8_t queue, uint8_t msix_vector)
1260 if (direction == -1) {
1262 msix_vector |= TXGBE_IVARMISC_VLD;
1264 tmp = rd32(hw, TXGBE_IVARMISC);
1265 tmp &= ~(0xFF << idx);
1266 tmp |= (msix_vector << idx);
1267 wr32(hw, TXGBE_IVARMISC, tmp);
1269 /* rx or tx causes */
1270 /* Workround for ICR lost */
1271 idx = ((16 * (queue & 1)) + (8 * direction));
1272 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1273 tmp &= ~(0xFF << idx);
1274 tmp |= (msix_vector << idx);
1275 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1280 * Sets up the hardware to properly generate MSI-X interrupts
1282 * board private structure
1285 txgbe_configure_msix(struct rte_eth_dev *dev)
1287 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1288 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1289 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1290 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1291 uint32_t vec = TXGBE_MISC_VEC_ID;
1294 /* won't configure msix register if no mapping is done
1295 * between intr vector and event fd
1296 * but if misx has been enabled already, need to configure
1297 * auto clean, auto mask and throttling.
1299 gpie = rd32(hw, TXGBE_GPIE);
1300 if (!rte_intr_dp_is_en(intr_handle) &&
1301 !(gpie & TXGBE_GPIE_MSIX))
1304 if (rte_intr_allow_others(intr_handle)) {
1305 base = TXGBE_RX_VEC_START;
1309 /* setup GPIE for MSI-x mode */
1310 gpie = rd32(hw, TXGBE_GPIE);
1311 gpie |= TXGBE_GPIE_MSIX;
1312 wr32(hw, TXGBE_GPIE, gpie);
1314 /* Populate the IVAR table and set the ITR values to the
1315 * corresponding register.
1317 if (rte_intr_dp_is_en(intr_handle)) {
1318 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1320 /* by default, 1:1 mapping */
1321 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1322 intr_handle->intr_vec[queue_id] = vec;
1323 if (vec < base + intr_handle->nb_efd - 1)
1327 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1329 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1330 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1335 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1336 u8 **mc_addr_ptr, u32 *vmdq)
1341 mc_addr = *mc_addr_ptr;
1342 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1347 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1348 struct rte_ether_addr *mc_addr_set,
1349 uint32_t nb_mc_addr)
1351 struct txgbe_hw *hw;
1354 hw = TXGBE_DEV_HW(dev);
1355 mc_addr_list = (u8 *)mc_addr_set;
1356 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1357 txgbe_dev_addr_list_itr, TRUE);
1360 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1361 .dev_configure = txgbe_dev_configure,
1362 .dev_infos_get = txgbe_dev_info_get,
1363 .dev_set_link_up = txgbe_dev_set_link_up,
1364 .dev_set_link_down = txgbe_dev_set_link_down,
1365 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
1366 .rx_queue_start = txgbe_dev_rx_queue_start,
1367 .rx_queue_stop = txgbe_dev_rx_queue_stop,
1368 .tx_queue_start = txgbe_dev_tx_queue_start,
1369 .tx_queue_stop = txgbe_dev_tx_queue_stop,
1370 .rx_queue_setup = txgbe_dev_rx_queue_setup,
1371 .rx_queue_release = txgbe_dev_rx_queue_release,
1372 .tx_queue_setup = txgbe_dev_tx_queue_setup,
1373 .tx_queue_release = txgbe_dev_tx_queue_release,
1374 .mac_addr_add = txgbe_add_rar,
1375 .mac_addr_remove = txgbe_remove_rar,
1376 .mac_addr_set = txgbe_set_default_mac_addr,
1377 .uc_hash_table_set = txgbe_uc_hash_table_set,
1378 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
1379 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
1382 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1383 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1384 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1386 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1387 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1389 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1390 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1392 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1393 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1396 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1397 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);