1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
32 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
33 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
34 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
37 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
38 struct rte_intr_handle *handle);
39 static void txgbe_dev_interrupt_handler(void *param);
40 static void txgbe_dev_interrupt_delayed_handler(void *param);
41 static void txgbe_configure_msix(struct rte_eth_dev *dev);
44 * The set of PCI devices this driver supports
46 static const struct rte_pci_id pci_id_txgbe_map[] = {
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
49 { .vendor_id = 0, /* sentinel */ },
52 static const struct rte_eth_desc_lim rx_desc_lim = {
53 .nb_max = TXGBE_RING_DESC_MAX,
54 .nb_min = TXGBE_RING_DESC_MIN,
55 .nb_align = TXGBE_RXD_ALIGN,
58 static const struct rte_eth_desc_lim tx_desc_lim = {
59 .nb_max = TXGBE_RING_DESC_MAX,
60 .nb_min = TXGBE_RING_DESC_MIN,
61 .nb_align = TXGBE_TXD_ALIGN,
62 .nb_seg_max = TXGBE_TX_MAX_SEG,
63 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
66 static const struct eth_dev_ops txgbe_eth_dev_ops;
69 txgbe_is_sfp(struct txgbe_hw *hw)
71 switch (hw->phy.type) {
72 case txgbe_phy_sfp_avago:
73 case txgbe_phy_sfp_ftl:
74 case txgbe_phy_sfp_intel:
75 case txgbe_phy_sfp_unknown:
76 case txgbe_phy_sfp_tyco_passive:
77 case txgbe_phy_sfp_unknown_passive:
85 txgbe_pf_reset_hw(struct txgbe_hw *hw)
90 status = hw->mac.reset_hw(hw);
92 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
93 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
94 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
95 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
98 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
104 txgbe_enable_intr(struct rte_eth_dev *dev)
106 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
107 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
109 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
110 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
111 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
116 txgbe_disable_intr(struct txgbe_hw *hw)
118 PMD_INIT_FUNC_TRACE();
120 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
121 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
122 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
127 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
129 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
130 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
131 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
132 const struct rte_memzone *mz;
136 PMD_INIT_FUNC_TRACE();
138 eth_dev->dev_ops = &txgbe_eth_dev_ops;
139 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
140 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
141 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
144 * For secondary processes, we don't initialise any further as primary
145 * has already done this work. Only check we don't need a different
146 * RX and TX function.
148 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
149 struct txgbe_tx_queue *txq;
150 /* TX queue function in primary, set by last queue initialized
151 * Tx queue may not initialized by primary process
153 if (eth_dev->data->tx_queues) {
154 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
155 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
156 txgbe_set_tx_function(eth_dev, txq);
158 /* Use default TX function if we get here */
159 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
160 "Using default TX function.");
163 txgbe_set_rx_function(eth_dev);
168 rte_eth_copy_pci_info(eth_dev, pci_dev);
170 /* Vendor and Device ID need to be set before init of shared code */
171 hw->device_id = pci_dev->id.device_id;
172 hw->vendor_id = pci_dev->id.vendor_id;
173 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
174 hw->allow_unsupported_sfp = 1;
176 /* Reserve memory for interrupt status block */
177 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
178 16, TXGBE_ALIGN, SOCKET_ID_ANY);
182 hw->isb_dma = TMZ_PADDR(mz);
183 hw->isb_mem = TMZ_VADDR(mz);
185 /* Initialize the shared code (base driver) */
186 err = txgbe_init_shared_code(hw);
188 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
192 err = hw->rom.init_params(hw);
194 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
198 /* Make sure we have a good EEPROM before we read from it */
199 err = hw->rom.validate_checksum(hw, &csum);
201 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
205 err = hw->mac.init_hw(hw);
208 * Devices with copper phys will fail to initialise if txgbe_init_hw()
209 * is called too soon after the kernel driver unbinding/binding occurs.
210 * The failure occurs in txgbe_identify_phy() for all devices,
211 * but for non-copper devies, txgbe_identify_sfp_module() is
212 * also called. See txgbe_identify_phy(). The reason for the
213 * failure is not known, and only occuts when virtualisation features
214 * are disabled in the bios. A delay of 200ms was found to be enough by
215 * trial-and-error, and is doubled to be safe.
217 if (err && hw->phy.media_type == txgbe_media_type_copper) {
219 err = hw->mac.init_hw(hw);
222 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
225 if (err == TXGBE_ERR_EEPROM_VERSION) {
226 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
227 "LOM. Please be aware there may be issues associated "
228 "with your hardware.");
229 PMD_INIT_LOG(ERR, "If you are experiencing problems "
230 "please contact your hardware representative "
231 "who provided you with this hardware.");
232 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
233 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
236 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
240 /* Reset the hw statistics */
241 txgbe_dev_stats_reset(eth_dev);
243 /* disable interrupt */
244 txgbe_disable_intr(hw);
246 /* Allocate memory for storing MAC addresses */
247 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
248 hw->mac.num_rar_entries, 0);
249 if (eth_dev->data->mac_addrs == NULL) {
251 "Failed to allocate %u bytes needed to store "
253 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
257 /* Copy the permanent MAC address */
258 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
259 ð_dev->data->mac_addrs[0]);
261 /* Allocate memory for storing hash filter MAC addresses */
262 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
263 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
264 if (eth_dev->data->hash_mac_addrs == NULL) {
266 "Failed to allocate %d bytes needed to store MAC addresses",
267 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
271 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
272 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
273 (int)hw->mac.type, (int)hw->phy.type,
274 (int)hw->phy.sfp_type);
276 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
277 (int)hw->mac.type, (int)hw->phy.type);
279 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
280 eth_dev->data->port_id, pci_dev->id.vendor_id,
281 pci_dev->id.device_id);
283 rte_intr_callback_register(intr_handle,
284 txgbe_dev_interrupt_handler, eth_dev);
286 /* enable uio/vfio intr/eventfd mapping */
287 rte_intr_enable(intr_handle);
289 /* enable support intr */
290 txgbe_enable_intr(eth_dev);
296 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
298 PMD_INIT_FUNC_TRACE();
300 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
303 txgbe_dev_close(eth_dev);
309 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
310 struct rte_pci_device *pci_dev)
312 struct rte_eth_dev *pf_ethdev;
313 struct rte_eth_devargs eth_da;
316 if (pci_dev->device.devargs) {
317 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
322 memset(ð_da, 0, sizeof(eth_da));
325 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
326 sizeof(struct txgbe_adapter),
327 eth_dev_pci_specific_init, pci_dev,
328 eth_txgbe_dev_init, NULL);
330 if (retval || eth_da.nb_representor_ports < 1)
333 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
334 if (pf_ethdev == NULL)
340 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
342 struct rte_eth_dev *ethdev;
344 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
348 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
351 static struct rte_pci_driver rte_txgbe_pmd = {
352 .id_table = pci_id_txgbe_map,
353 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
354 RTE_PCI_DRV_INTR_LSC,
355 .probe = eth_txgbe_pci_probe,
356 .remove = eth_txgbe_pci_remove,
360 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
362 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
367 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
370 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
376 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
377 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
378 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
379 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
384 txgbe_check_mq_mode(struct rte_eth_dev *dev)
386 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
387 uint16_t nb_rx_q = dev->data->nb_rx_queues;
388 uint16_t nb_tx_q = dev->data->nb_tx_queues;
390 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
391 /* check multi-queue mode */
392 switch (dev_conf->rxmode.mq_mode) {
393 case ETH_MQ_RX_VMDQ_DCB:
394 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
396 case ETH_MQ_RX_VMDQ_DCB_RSS:
397 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
398 PMD_INIT_LOG(ERR, "SRIOV active,"
399 " unsupported mq_mode rx %d.",
400 dev_conf->rxmode.mq_mode);
403 case ETH_MQ_RX_VMDQ_RSS:
404 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
405 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
406 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
407 PMD_INIT_LOG(ERR, "SRIOV is active,"
408 " invalid queue number"
409 " for VMDQ RSS, allowed"
410 " value are 1, 2 or 4.");
414 case ETH_MQ_RX_VMDQ_ONLY:
416 /* if nothing mq mode configure, use default scheme */
417 dev->data->dev_conf.rxmode.mq_mode =
420 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
421 /* SRIOV only works in VMDq enable mode */
422 PMD_INIT_LOG(ERR, "SRIOV is active,"
423 " wrong mq_mode rx %d.",
424 dev_conf->rxmode.mq_mode);
428 switch (dev_conf->txmode.mq_mode) {
429 case ETH_MQ_TX_VMDQ_DCB:
430 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
431 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
433 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
434 dev->data->dev_conf.txmode.mq_mode =
439 /* check valid queue number */
440 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
441 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
442 PMD_INIT_LOG(ERR, "SRIOV is active,"
443 " nb_rx_q=%d nb_tx_q=%d queue number"
444 " must be less than or equal to %d.",
446 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
450 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
451 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
455 /* check configuration for vmdb+dcb mode */
456 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
457 const struct rte_eth_vmdq_dcb_conf *conf;
459 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
460 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
461 TXGBE_VMDQ_DCB_NB_QUEUES);
464 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
465 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
466 conf->nb_queue_pools == ETH_32_POOLS)) {
467 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
468 " nb_queue_pools must be %d or %d.",
469 ETH_16_POOLS, ETH_32_POOLS);
473 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
474 const struct rte_eth_vmdq_dcb_tx_conf *conf;
476 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
477 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
478 TXGBE_VMDQ_DCB_NB_QUEUES);
481 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
482 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
483 conf->nb_queue_pools == ETH_32_POOLS)) {
484 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
485 " nb_queue_pools != %d and"
486 " nb_queue_pools != %d.",
487 ETH_16_POOLS, ETH_32_POOLS);
492 /* For DCB mode check our configuration before we go further */
493 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
494 const struct rte_eth_dcb_rx_conf *conf;
496 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
497 if (!(conf->nb_tcs == ETH_4_TCS ||
498 conf->nb_tcs == ETH_8_TCS)) {
499 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
500 " and nb_tcs != %d.",
501 ETH_4_TCS, ETH_8_TCS);
506 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
507 const struct rte_eth_dcb_tx_conf *conf;
509 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
510 if (!(conf->nb_tcs == ETH_4_TCS ||
511 conf->nb_tcs == ETH_8_TCS)) {
512 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
513 " and nb_tcs != %d.",
514 ETH_4_TCS, ETH_8_TCS);
523 txgbe_dev_configure(struct rte_eth_dev *dev)
525 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
526 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
529 PMD_INIT_FUNC_TRACE();
531 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
532 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
534 /* multiple queue mode checking */
535 ret = txgbe_check_mq_mode(dev);
537 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
542 /* set flag to update link status after init */
543 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
546 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
547 * allocation Rx preconditions we will reset it.
549 adapter->rx_bulk_alloc_allowed = true;
555 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
557 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
558 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
561 gpie = rd32(hw, TXGBE_GPIOINTEN);
562 gpie |= TXGBE_GPIOBIT_6;
563 wr32(hw, TXGBE_GPIOINTEN, gpie);
564 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
568 * Configure device link speed and setup link.
569 * It returns 0 on success.
572 txgbe_dev_start(struct rte_eth_dev *dev)
574 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
575 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
576 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
577 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
578 uint32_t intr_vector = 0;
580 bool link_up = false, negotiate = 0;
582 uint32_t allowed_speeds = 0;
584 uint32_t *link_speeds;
586 PMD_INIT_FUNC_TRACE();
588 /* TXGBE devices don't support:
589 * - half duplex (checked afterwards for valid speeds)
590 * - fixed speed: TODO implement
592 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
594 "Invalid link_speeds for port %u, fix speed not supported",
599 /* Stop the link setup handler before resetting the HW. */
600 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
602 /* disable uio/vfio intr/eventfd mapping */
603 rte_intr_disable(intr_handle);
606 hw->adapter_stopped = 0;
609 /* reinitialize adapter
610 * this calls reset and start
612 hw->nb_rx_queues = dev->data->nb_rx_queues;
613 hw->nb_tx_queues = dev->data->nb_tx_queues;
614 status = txgbe_pf_reset_hw(hw);
617 hw->mac.start_hw(hw);
618 hw->mac.get_link_status = true;
620 txgbe_dev_phy_intr_setup(dev);
622 /* check and configure queue intr-vector mapping */
623 if ((rte_intr_cap_multiple(intr_handle) ||
624 !RTE_ETH_DEV_SRIOV(dev).active) &&
625 dev->data->dev_conf.intr_conf.rxq != 0) {
626 intr_vector = dev->data->nb_rx_queues;
627 if (rte_intr_efd_enable(intr_handle, intr_vector))
631 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
632 intr_handle->intr_vec =
633 rte_zmalloc("intr_vec",
634 dev->data->nb_rx_queues * sizeof(int), 0);
635 if (intr_handle->intr_vec == NULL) {
636 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
637 " intr_vec", dev->data->nb_rx_queues);
642 /* confiugre msix for sleep until rx interrupt */
643 txgbe_configure_msix(dev);
645 /* initialize transmission unit */
646 txgbe_dev_tx_init(dev);
648 /* This can fail when allocating mbufs for descriptor rings */
649 err = txgbe_dev_rx_init(dev);
651 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
655 err = txgbe_dev_rxtx_start(dev);
657 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
661 /* Skip link setup if loopback mode is enabled. */
662 if (hw->mac.type == txgbe_mac_raptor &&
663 dev->data->dev_conf.lpbk_mode)
664 goto skip_link_setup;
666 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
667 err = hw->mac.setup_sfp(hw);
672 if (hw->phy.media_type == txgbe_media_type_copper) {
673 /* Turn on the copper */
674 hw->phy.set_phy_power(hw, true);
676 /* Turn on the laser */
677 hw->mac.enable_tx_laser(hw);
680 err = hw->mac.check_link(hw, &speed, &link_up, 0);
683 dev->data->dev_link.link_status = link_up;
685 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
689 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
692 link_speeds = &dev->data->dev_conf.link_speeds;
693 if (*link_speeds & ~allowed_speeds) {
694 PMD_INIT_LOG(ERR, "Invalid link setting");
699 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
700 speed = (TXGBE_LINK_SPEED_100M_FULL |
701 TXGBE_LINK_SPEED_1GB_FULL |
702 TXGBE_LINK_SPEED_10GB_FULL);
704 if (*link_speeds & ETH_LINK_SPEED_10G)
705 speed |= TXGBE_LINK_SPEED_10GB_FULL;
706 if (*link_speeds & ETH_LINK_SPEED_5G)
707 speed |= TXGBE_LINK_SPEED_5GB_FULL;
708 if (*link_speeds & ETH_LINK_SPEED_2_5G)
709 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
710 if (*link_speeds & ETH_LINK_SPEED_1G)
711 speed |= TXGBE_LINK_SPEED_1GB_FULL;
712 if (*link_speeds & ETH_LINK_SPEED_100M)
713 speed |= TXGBE_LINK_SPEED_100M_FULL;
716 err = hw->mac.setup_link(hw, speed, link_up);
722 if (rte_intr_allow_others(intr_handle)) {
723 /* check if lsc interrupt is enabled */
724 if (dev->data->dev_conf.intr_conf.lsc != 0)
725 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
727 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
728 txgbe_dev_macsec_interrupt_setup(dev);
729 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
731 rte_intr_callback_unregister(intr_handle,
732 txgbe_dev_interrupt_handler, dev);
733 if (dev->data->dev_conf.intr_conf.lsc != 0)
734 PMD_INIT_LOG(INFO, "lsc won't enable because of"
735 " no intr multiplex");
738 /* check if rxq interrupt is enabled */
739 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
740 rte_intr_dp_is_en(intr_handle))
741 txgbe_dev_rxq_interrupt_setup(dev);
743 /* enable uio/vfio intr/eventfd mapping */
744 rte_intr_enable(intr_handle);
746 /* resume enabled intr since hw reset */
747 txgbe_enable_intr(dev);
750 * Update link status right before return, because it may
751 * start link configuration process in a separate thread.
753 txgbe_dev_link_update(dev, 0);
755 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
757 txgbe_read_stats_registers(hw, hw_stats);
758 hw->offset_loaded = 1;
763 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
764 txgbe_dev_clear_queues(dev);
769 * Stop device: disable rx and tx functions to allow for reconfiguring.
772 txgbe_dev_stop(struct rte_eth_dev *dev)
774 struct rte_eth_link link;
775 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
776 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
777 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
779 if (hw->adapter_stopped)
782 PMD_INIT_FUNC_TRACE();
784 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
786 /* disable interrupts */
787 txgbe_disable_intr(hw);
790 txgbe_pf_reset_hw(hw);
791 hw->adapter_stopped = 0;
796 if (hw->phy.media_type == txgbe_media_type_copper) {
797 /* Turn off the copper */
798 hw->phy.set_phy_power(hw, false);
800 /* Turn off the laser */
801 hw->mac.disable_tx_laser(hw);
804 txgbe_dev_clear_queues(dev);
806 /* Clear stored conf */
807 dev->data->scattered_rx = 0;
810 /* Clear recorded link status */
811 memset(&link, 0, sizeof(link));
812 rte_eth_linkstatus_set(dev, &link);
814 if (!rte_intr_allow_others(intr_handle))
815 /* resume to the default handler */
816 rte_intr_callback_register(intr_handle,
817 txgbe_dev_interrupt_handler,
820 /* Clean datapath event and queue/vec mapping */
821 rte_intr_efd_disable(intr_handle);
822 if (intr_handle->intr_vec != NULL) {
823 rte_free(intr_handle->intr_vec);
824 intr_handle->intr_vec = NULL;
827 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
829 hw->adapter_stopped = true;
830 dev->data->dev_started = 0;
836 * Set device link up: enable tx.
839 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
841 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
843 if (hw->phy.media_type == txgbe_media_type_copper) {
844 /* Turn on the copper */
845 hw->phy.set_phy_power(hw, true);
847 /* Turn on the laser */
848 hw->mac.enable_tx_laser(hw);
849 txgbe_dev_link_update(dev, 0);
856 * Set device link down: disable tx.
859 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
861 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
863 if (hw->phy.media_type == txgbe_media_type_copper) {
864 /* Turn off the copper */
865 hw->phy.set_phy_power(hw, false);
867 /* Turn off the laser */
868 hw->mac.disable_tx_laser(hw);
869 txgbe_dev_link_update(dev, 0);
876 * Reset and stop device.
879 txgbe_dev_close(struct rte_eth_dev *dev)
881 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
882 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
883 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
887 PMD_INIT_FUNC_TRACE();
889 txgbe_pf_reset_hw(hw);
891 ret = txgbe_dev_stop(dev);
893 txgbe_dev_free_queues(dev);
895 /* reprogram the RAR[0] in case user changed it. */
896 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
898 /* disable uio intr before callback unregister */
899 rte_intr_disable(intr_handle);
902 ret = rte_intr_callback_unregister(intr_handle,
903 txgbe_dev_interrupt_handler, dev);
904 if (ret >= 0 || ret == -ENOENT) {
906 } else if (ret != -EAGAIN) {
908 "intr callback unregister failed: %d",
912 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
914 /* cancel the delay handler before remove dev */
915 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
917 rte_free(dev->data->mac_addrs);
918 dev->data->mac_addrs = NULL;
920 rte_free(dev->data->hash_mac_addrs);
921 dev->data->hash_mac_addrs = NULL;
930 txgbe_dev_reset(struct rte_eth_dev *dev)
934 /* When a DPDK PMD PF begin to reset PF port, it should notify all
935 * its VF to make them align with it. The detailed notification
936 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
937 * To avoid unexpected behavior in VF, currently reset of PF with
938 * SR-IOV activation is not supported. It might be supported later.
940 if (dev->data->sriov.active)
943 ret = eth_txgbe_dev_uninit(dev);
947 ret = eth_txgbe_dev_init(dev, NULL);
952 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
954 uint32_t current_counter = rd32(hw, reg); \
955 if (current_counter < last_counter) \
956 current_counter += 0x100000000LL; \
957 if (!hw->offset_loaded) \
958 last_counter = current_counter; \
959 counter = current_counter - last_counter; \
960 counter &= 0xFFFFFFFFLL; \
963 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
965 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
966 uint64_t current_counter_msb = rd32(hw, reg_msb); \
967 uint64_t current_counter = (current_counter_msb << 32) | \
968 current_counter_lsb; \
969 if (current_counter < last_counter) \
970 current_counter += 0x1000000000LL; \
971 if (!hw->offset_loaded) \
972 last_counter = current_counter; \
973 counter = current_counter - last_counter; \
974 counter &= 0xFFFFFFFFFLL; \
978 txgbe_read_stats_registers(struct txgbe_hw *hw,
979 struct txgbe_hw_stats *hw_stats)
984 for (i = 0; i < hw->nb_rx_queues; i++) {
985 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
986 hw->qp_last[i].rx_qp_packets,
987 hw_stats->qp[i].rx_qp_packets);
988 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
989 hw->qp_last[i].rx_qp_bytes,
990 hw_stats->qp[i].rx_qp_bytes);
991 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
992 hw->qp_last[i].rx_qp_mc_packets,
993 hw_stats->qp[i].rx_qp_mc_packets);
996 for (i = 0; i < hw->nb_tx_queues; i++) {
997 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
998 hw->qp_last[i].tx_qp_packets,
999 hw_stats->qp[i].tx_qp_packets);
1000 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1001 hw->qp_last[i].tx_qp_bytes,
1002 hw_stats->qp[i].tx_qp_bytes);
1005 for (i = 0; i < TXGBE_MAX_UP; i++) {
1006 hw_stats->up[i].rx_up_xon_packets +=
1007 rd32(hw, TXGBE_PBRXUPXON(i));
1008 hw_stats->up[i].rx_up_xoff_packets +=
1009 rd32(hw, TXGBE_PBRXUPXOFF(i));
1010 hw_stats->up[i].tx_up_xon_packets +=
1011 rd32(hw, TXGBE_PBTXUPXON(i));
1012 hw_stats->up[i].tx_up_xoff_packets +=
1013 rd32(hw, TXGBE_PBTXUPXOFF(i));
1014 hw_stats->up[i].tx_up_xon2off_packets +=
1015 rd32(hw, TXGBE_PBTXUPOFF(i));
1016 hw_stats->up[i].rx_up_dropped +=
1017 rd32(hw, TXGBE_PBRXMISS(i));
1019 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1020 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1021 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1022 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1025 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1026 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1028 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1029 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1030 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1033 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1034 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1035 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1037 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1038 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1039 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1041 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1042 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1044 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1045 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1046 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1047 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1048 hw_stats->rx_size_512_to_1023_packets +=
1049 rd64(hw, TXGBE_MACRX512TO1023L);
1050 hw_stats->rx_size_1024_to_max_packets +=
1051 rd64(hw, TXGBE_MACRX1024TOMAXL);
1052 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1053 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1054 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1055 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1056 hw_stats->tx_size_512_to_1023_packets +=
1057 rd64(hw, TXGBE_MACTX512TO1023L);
1058 hw_stats->tx_size_1024_to_max_packets +=
1059 rd64(hw, TXGBE_MACTX1024TOMAXL);
1061 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1062 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1063 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1066 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1067 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1068 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1069 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1072 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1073 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1074 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1075 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1076 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1077 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1078 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1080 /* Flow Director Stats */
1081 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1082 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1083 hw_stats->flow_director_added_filters +=
1084 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1085 hw_stats->flow_director_removed_filters +=
1086 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1087 hw_stats->flow_director_filter_add_errors +=
1088 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1089 hw_stats->flow_director_filter_remove_errors +=
1090 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1093 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1094 hw_stats->tx_macsec_pkts_encrypted +=
1095 rd32(hw, TXGBE_LSECTX_ENCPKT);
1096 hw_stats->tx_macsec_pkts_protected +=
1097 rd32(hw, TXGBE_LSECTX_PROTPKT);
1098 hw_stats->tx_macsec_octets_encrypted +=
1099 rd32(hw, TXGBE_LSECTX_ENCOCT);
1100 hw_stats->tx_macsec_octets_protected +=
1101 rd32(hw, TXGBE_LSECTX_PROTOCT);
1102 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1103 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1104 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1105 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1106 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1107 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1108 hw_stats->rx_macsec_sc_pkts_unchecked +=
1109 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1110 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1111 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1112 for (i = 0; i < 2; i++) {
1113 hw_stats->rx_macsec_sa_pkts_ok +=
1114 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1115 hw_stats->rx_macsec_sa_pkts_invalid +=
1116 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1117 hw_stats->rx_macsec_sa_pkts_notvalid +=
1118 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1120 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1121 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1122 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1123 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1125 hw_stats->rx_total_missed_packets = 0;
1126 for (i = 0; i < TXGBE_MAX_UP; i++) {
1127 hw_stats->rx_total_missed_packets +=
1128 hw_stats->up[i].rx_up_dropped;
1133 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1135 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1136 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1137 struct txgbe_stat_mappings *stat_mappings =
1138 TXGBE_DEV_STAT_MAPPINGS(dev);
1141 txgbe_read_stats_registers(hw, hw_stats);
1146 /* Fill out the rte_eth_stats statistics structure */
1147 stats->ipackets = hw_stats->rx_packets;
1148 stats->ibytes = hw_stats->rx_bytes;
1149 stats->opackets = hw_stats->tx_packets;
1150 stats->obytes = hw_stats->tx_bytes;
1152 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1153 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1154 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1155 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1156 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1157 for (i = 0; i < TXGBE_MAX_QP; i++) {
1158 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1159 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1162 q_map = (stat_mappings->rqsm[n] >> offset)
1163 & QMAP_FIELD_RESERVED_BITS_MASK;
1164 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1165 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1166 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1167 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1169 q_map = (stat_mappings->tqsm[n] >> offset)
1170 & QMAP_FIELD_RESERVED_BITS_MASK;
1171 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1172 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1173 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1174 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1178 stats->imissed = hw_stats->rx_total_missed_packets;
1179 stats->ierrors = hw_stats->rx_crc_errors +
1180 hw_stats->rx_mac_short_packet_dropped +
1181 hw_stats->rx_length_errors +
1182 hw_stats->rx_undersize_errors +
1183 hw_stats->rx_oversize_errors +
1184 hw_stats->rx_drop_packets +
1185 hw_stats->rx_illegal_byte_errors +
1186 hw_stats->rx_error_bytes +
1187 hw_stats->rx_fragment_errors +
1188 hw_stats->rx_fcoe_crc_errors +
1189 hw_stats->rx_fcoe_mbuf_allocation_errors;
1197 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1199 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1200 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1202 /* HW registers are cleared on read */
1203 hw->offset_loaded = 0;
1204 txgbe_dev_stats_get(dev, NULL);
1205 hw->offset_loaded = 1;
1207 /* Reset software totals */
1208 memset(hw_stats, 0, sizeof(*hw_stats));
1214 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1216 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1217 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1219 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1220 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1221 dev_info->min_rx_bufsize = 1024;
1222 dev_info->max_rx_pktlen = 15872;
1223 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1224 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
1225 dev_info->max_vfs = pci_dev->max_vfs;
1226 dev_info->max_vmdq_pools = ETH_64_POOLS;
1227 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
1228 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
1229 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
1230 dev_info->rx_queue_offload_capa);
1231 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
1232 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
1234 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1236 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
1237 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
1238 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
1240 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
1245 dev_info->default_txconf = (struct rte_eth_txconf) {
1247 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
1248 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
1249 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
1251 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
1255 dev_info->rx_desc_lim = rx_desc_lim;
1256 dev_info->tx_desc_lim = tx_desc_lim;
1258 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1259 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1260 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
1262 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1263 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1265 /* Driver-preferred Rx/Tx parameters */
1266 dev_info->default_rxportconf.burst_size = 32;
1267 dev_info->default_txportconf.burst_size = 32;
1268 dev_info->default_rxportconf.nb_queues = 1;
1269 dev_info->default_txportconf.nb_queues = 1;
1270 dev_info->default_rxportconf.ring_size = 256;
1271 dev_info->default_txportconf.ring_size = 256;
1277 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1279 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
1280 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
1281 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
1282 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
1283 return txgbe_get_supported_ptypes();
1289 txgbe_dev_setup_link_alarm_handler(void *param)
1291 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1292 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1293 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1295 bool autoneg = false;
1297 speed = hw->phy.autoneg_advertised;
1299 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1301 hw->mac.setup_link(hw, speed, true);
1303 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1306 /* return 0 means link status changed, -1 means not changed */
1308 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
1309 int wait_to_complete)
1311 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1312 struct rte_eth_link link;
1313 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
1314 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1319 memset(&link, 0, sizeof(link));
1320 link.link_status = ETH_LINK_DOWN;
1321 link.link_speed = ETH_SPEED_NUM_NONE;
1322 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1323 link.link_autoneg = ETH_LINK_AUTONEG;
1325 hw->mac.get_link_status = true;
1327 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
1328 return rte_eth_linkstatus_set(dev, &link);
1330 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1331 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1334 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1337 link.link_speed = ETH_SPEED_NUM_100M;
1338 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1339 return rte_eth_linkstatus_set(dev, &link);
1343 if (hw->phy.media_type == txgbe_media_type_fiber) {
1344 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
1345 rte_eal_alarm_set(10,
1346 txgbe_dev_setup_link_alarm_handler, dev);
1348 return rte_eth_linkstatus_set(dev, &link);
1351 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1352 link.link_status = ETH_LINK_UP;
1353 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1355 switch (link_speed) {
1357 case TXGBE_LINK_SPEED_UNKNOWN:
1358 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1359 link.link_speed = ETH_SPEED_NUM_100M;
1362 case TXGBE_LINK_SPEED_100M_FULL:
1363 link.link_speed = ETH_SPEED_NUM_100M;
1366 case TXGBE_LINK_SPEED_1GB_FULL:
1367 link.link_speed = ETH_SPEED_NUM_1G;
1370 case TXGBE_LINK_SPEED_2_5GB_FULL:
1371 link.link_speed = ETH_SPEED_NUM_2_5G;
1374 case TXGBE_LINK_SPEED_5GB_FULL:
1375 link.link_speed = ETH_SPEED_NUM_5G;
1378 case TXGBE_LINK_SPEED_10GB_FULL:
1379 link.link_speed = ETH_SPEED_NUM_10G;
1383 return rte_eth_linkstatus_set(dev, &link);
1387 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1389 return txgbe_dev_link_update_share(dev, wait_to_complete);
1393 * It clears the interrupt causes and enables the interrupt.
1394 * It will be called once only during nic initialized.
1397 * Pointer to struct rte_eth_dev.
1399 * Enable or Disable.
1402 * - On success, zero.
1403 * - On failure, a negative value.
1406 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1408 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1410 txgbe_dev_link_status_print(dev);
1412 intr->mask_misc |= TXGBE_ICRMISC_LSC;
1414 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1420 * It clears the interrupt causes and enables the interrupt.
1421 * It will be called once only during nic initialized.
1424 * Pointer to struct rte_eth_dev.
1427 * - On success, zero.
1428 * - On failure, a negative value.
1431 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1433 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1435 intr->mask[0] |= TXGBE_ICR_MASK;
1436 intr->mask[1] |= TXGBE_ICR_MASK;
1442 * It clears the interrupt causes and enables the interrupt.
1443 * It will be called once only during nic initialized.
1446 * Pointer to struct rte_eth_dev.
1449 * - On success, zero.
1450 * - On failure, a negative value.
1453 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1455 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1457 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
1463 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
1466 * Pointer to struct rte_eth_dev.
1469 * - On success, zero.
1470 * - On failure, a negative value.
1473 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1476 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1477 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1479 /* clear all cause mask */
1480 txgbe_disable_intr(hw);
1482 /* read-on-clear nic registers here */
1483 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1484 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1488 /* set flag for async link update */
1489 if (eicr & TXGBE_ICRMISC_LSC)
1490 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1492 if (eicr & TXGBE_ICRMISC_VFMBX)
1493 intr->flags |= TXGBE_FLAG_MAILBOX;
1495 if (eicr & TXGBE_ICRMISC_LNKSEC)
1496 intr->flags |= TXGBE_FLAG_MACSEC;
1498 if (eicr & TXGBE_ICRMISC_GPIO)
1499 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
1505 * It gets and then prints the link status.
1508 * Pointer to struct rte_eth_dev.
1511 * - On success, zero.
1512 * - On failure, a negative value.
1515 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
1517 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1518 struct rte_eth_link link;
1520 rte_eth_linkstatus_get(dev, &link);
1522 if (link.link_status) {
1523 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1524 (int)(dev->data->port_id),
1525 (unsigned int)link.link_speed,
1526 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1527 "full-duplex" : "half-duplex");
1529 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1530 (int)(dev->data->port_id));
1532 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1533 pci_dev->addr.domain,
1535 pci_dev->addr.devid,
1536 pci_dev->addr.function);
1540 * It executes link_update after knowing an interrupt occurred.
1543 * Pointer to struct rte_eth_dev.
1546 * - On success, zero.
1547 * - On failure, a negative value.
1550 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1551 struct rte_intr_handle *intr_handle)
1553 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1555 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1557 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1559 if (intr->flags & TXGBE_FLAG_MAILBOX)
1560 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1562 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1563 hw->phy.handle_lasi(hw);
1564 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1567 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1568 struct rte_eth_link link;
1570 /*get the link status before link update, for predicting later*/
1571 rte_eth_linkstatus_get(dev, &link);
1573 txgbe_dev_link_update(dev, 0);
1576 if (!link.link_status)
1577 /* handle it 1 sec later, wait it being stable */
1578 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
1579 /* likely to down */
1581 /* handle it 4 sec later, wait it being stable */
1582 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
1584 txgbe_dev_link_status_print(dev);
1585 if (rte_eal_alarm_set(timeout * 1000,
1586 txgbe_dev_interrupt_delayed_handler,
1588 PMD_DRV_LOG(ERR, "Error setting alarm");
1590 /* remember original mask */
1591 intr->mask_misc_orig = intr->mask_misc;
1592 /* only disable lsc interrupt */
1593 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1597 PMD_DRV_LOG(DEBUG, "enable intr immediately");
1598 txgbe_enable_intr(dev);
1599 rte_intr_enable(intr_handle);
1605 * Interrupt handler which shall be registered for alarm callback for delayed
1606 * handling specific interrupt to wait for the stable nic state. As the
1607 * NIC interrupt state is not stable for txgbe after link is just down,
1608 * it needs to wait 4 seconds to get the stable status.
1611 * Pointer to interrupt handle.
1613 * The address of parameter (struct rte_eth_dev *) registered before.
1619 txgbe_dev_interrupt_delayed_handler(void *param)
1621 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1622 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1623 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1624 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1625 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1628 txgbe_disable_intr(hw);
1630 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1632 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1633 hw->phy.handle_lasi(hw);
1634 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1637 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1638 txgbe_dev_link_update(dev, 0);
1639 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1640 txgbe_dev_link_status_print(dev);
1641 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1645 if (intr->flags & TXGBE_FLAG_MACSEC) {
1646 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1648 intr->flags &= ~TXGBE_FLAG_MACSEC;
1651 /* restore original mask */
1652 intr->mask_misc = intr->mask_misc_orig;
1653 intr->mask_misc_orig = 0;
1655 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1656 txgbe_enable_intr(dev);
1657 rte_intr_enable(intr_handle);
1661 * Interrupt handler triggered by NIC for handling
1662 * specific interrupt.
1665 * Pointer to interrupt handle.
1667 * The address of parameter (struct rte_eth_dev *) registered before.
1673 txgbe_dev_interrupt_handler(void *param)
1675 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1677 txgbe_dev_interrupt_get_status(dev);
1678 txgbe_dev_interrupt_action(dev, dev->intr_handle);
1682 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1683 uint32_t index, uint32_t pool)
1685 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1686 uint32_t enable_addr = 1;
1688 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1693 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1695 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1697 txgbe_clear_rar(hw, index);
1701 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1703 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1705 txgbe_remove_rar(dev, 0);
1706 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1712 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1714 uint32_t vector = 0;
1716 switch (hw->mac.mc_filter_type) {
1717 case 0: /* use bits [47:36] of the address */
1718 vector = ((uc_addr->addr_bytes[4] >> 4) |
1719 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1721 case 1: /* use bits [46:35] of the address */
1722 vector = ((uc_addr->addr_bytes[4] >> 3) |
1723 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1725 case 2: /* use bits [45:34] of the address */
1726 vector = ((uc_addr->addr_bytes[4] >> 2) |
1727 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1729 case 3: /* use bits [43:32] of the address */
1730 vector = ((uc_addr->addr_bytes[4]) |
1731 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1733 default: /* Invalid mc_filter_type */
1737 /* vector can only be 12-bits or boundary will be exceeded */
1743 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1744 struct rte_ether_addr *mac_addr, uint8_t on)
1752 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1753 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1755 /* The UTA table only exists on pf hardware */
1756 if (hw->mac.type < txgbe_mac_raptor)
1759 vector = txgbe_uta_vector(hw, mac_addr);
1760 uta_idx = (vector >> 5) & 0x7F;
1761 uta_mask = 0x1UL << (vector & 0x1F);
1763 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1766 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1768 uta_info->uta_in_use++;
1769 reg_val |= uta_mask;
1770 uta_info->uta_shadow[uta_idx] |= uta_mask;
1772 uta_info->uta_in_use--;
1773 reg_val &= ~uta_mask;
1774 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1777 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1779 psrctl = rd32(hw, TXGBE_PSRCTL);
1780 if (uta_info->uta_in_use > 0)
1781 psrctl |= TXGBE_PSRCTL_UCHFENA;
1783 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1785 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1786 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1787 wr32(hw, TXGBE_PSRCTL, psrctl);
1793 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1795 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1796 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1800 /* The UTA table only exists on pf hardware */
1801 if (hw->mac.type < txgbe_mac_raptor)
1805 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1806 uta_info->uta_shadow[i] = ~0;
1807 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1810 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1811 uta_info->uta_shadow[i] = 0;
1812 wr32(hw, TXGBE_UCADDRTBL(i), 0);
1816 psrctl = rd32(hw, TXGBE_PSRCTL);
1818 psrctl |= TXGBE_PSRCTL_UCHFENA;
1820 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1822 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1823 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1824 wr32(hw, TXGBE_PSRCTL, psrctl);
1830 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1832 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1833 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1835 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1837 if (queue_id < 32) {
1838 mask = rd32(hw, TXGBE_IMS(0));
1839 mask &= (1 << queue_id);
1840 wr32(hw, TXGBE_IMS(0), mask);
1841 } else if (queue_id < 64) {
1842 mask = rd32(hw, TXGBE_IMS(1));
1843 mask &= (1 << (queue_id - 32));
1844 wr32(hw, TXGBE_IMS(1), mask);
1846 rte_intr_enable(intr_handle);
1852 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1855 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1857 if (queue_id < 32) {
1858 mask = rd32(hw, TXGBE_IMS(0));
1859 mask &= ~(1 << queue_id);
1860 wr32(hw, TXGBE_IMS(0), mask);
1861 } else if (queue_id < 64) {
1862 mask = rd32(hw, TXGBE_IMS(1));
1863 mask &= ~(1 << (queue_id - 32));
1864 wr32(hw, TXGBE_IMS(1), mask);
1871 * set the IVAR registers, mapping interrupt causes to vectors
1873 * pointer to txgbe_hw struct
1875 * 0 for Rx, 1 for Tx, -1 for other causes
1877 * queue to map the corresponding interrupt to
1879 * the vector to map to the corresponding queue
1882 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1883 uint8_t queue, uint8_t msix_vector)
1887 if (direction == -1) {
1889 msix_vector |= TXGBE_IVARMISC_VLD;
1891 tmp = rd32(hw, TXGBE_IVARMISC);
1892 tmp &= ~(0xFF << idx);
1893 tmp |= (msix_vector << idx);
1894 wr32(hw, TXGBE_IVARMISC, tmp);
1896 /* rx or tx causes */
1897 /* Workround for ICR lost */
1898 idx = ((16 * (queue & 1)) + (8 * direction));
1899 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1900 tmp &= ~(0xFF << idx);
1901 tmp |= (msix_vector << idx);
1902 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1907 * Sets up the hardware to properly generate MSI-X interrupts
1909 * board private structure
1912 txgbe_configure_msix(struct rte_eth_dev *dev)
1914 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1915 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1916 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1917 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1918 uint32_t vec = TXGBE_MISC_VEC_ID;
1921 /* won't configure msix register if no mapping is done
1922 * between intr vector and event fd
1923 * but if misx has been enabled already, need to configure
1924 * auto clean, auto mask and throttling.
1926 gpie = rd32(hw, TXGBE_GPIE);
1927 if (!rte_intr_dp_is_en(intr_handle) &&
1928 !(gpie & TXGBE_GPIE_MSIX))
1931 if (rte_intr_allow_others(intr_handle)) {
1932 base = TXGBE_RX_VEC_START;
1936 /* setup GPIE for MSI-x mode */
1937 gpie = rd32(hw, TXGBE_GPIE);
1938 gpie |= TXGBE_GPIE_MSIX;
1939 wr32(hw, TXGBE_GPIE, gpie);
1941 /* Populate the IVAR table and set the ITR values to the
1942 * corresponding register.
1944 if (rte_intr_dp_is_en(intr_handle)) {
1945 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1947 /* by default, 1:1 mapping */
1948 txgbe_set_ivar_map(hw, 0, queue_id, vec);
1949 intr_handle->intr_vec[queue_id] = vec;
1950 if (vec < base + intr_handle->nb_efd - 1)
1954 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1956 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1957 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1962 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1963 u8 **mc_addr_ptr, u32 *vmdq)
1968 mc_addr = *mc_addr_ptr;
1969 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1974 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1975 struct rte_ether_addr *mc_addr_set,
1976 uint32_t nb_mc_addr)
1978 struct txgbe_hw *hw;
1981 hw = TXGBE_DEV_HW(dev);
1982 mc_addr_list = (u8 *)mc_addr_set;
1983 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1984 txgbe_dev_addr_list_itr, TRUE);
1987 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1988 .dev_configure = txgbe_dev_configure,
1989 .dev_infos_get = txgbe_dev_info_get,
1990 .dev_start = txgbe_dev_start,
1991 .dev_stop = txgbe_dev_stop,
1992 .dev_set_link_up = txgbe_dev_set_link_up,
1993 .dev_set_link_down = txgbe_dev_set_link_down,
1994 .dev_close = txgbe_dev_close,
1995 .dev_reset = txgbe_dev_reset,
1996 .link_update = txgbe_dev_link_update,
1997 .stats_get = txgbe_dev_stats_get,
1998 .stats_reset = txgbe_dev_stats_reset,
1999 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
2000 .rx_queue_start = txgbe_dev_rx_queue_start,
2001 .rx_queue_stop = txgbe_dev_rx_queue_stop,
2002 .tx_queue_start = txgbe_dev_tx_queue_start,
2003 .tx_queue_stop = txgbe_dev_tx_queue_stop,
2004 .rx_queue_setup = txgbe_dev_rx_queue_setup,
2005 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
2006 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
2007 .rx_queue_release = txgbe_dev_rx_queue_release,
2008 .tx_queue_setup = txgbe_dev_tx_queue_setup,
2009 .tx_queue_release = txgbe_dev_tx_queue_release,
2010 .mac_addr_add = txgbe_add_rar,
2011 .mac_addr_remove = txgbe_remove_rar,
2012 .mac_addr_set = txgbe_set_default_mac_addr,
2013 .uc_hash_table_set = txgbe_uc_hash_table_set,
2014 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
2015 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
2016 .rxq_info_get = txgbe_rxq_info_get,
2017 .txq_info_get = txgbe_txq_info_get,
2020 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2021 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2022 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2024 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2025 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2027 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2028 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2030 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2031 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2034 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2035 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);