1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
32 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
33 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
34 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
37 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
38 struct rte_intr_handle *handle);
39 static void txgbe_dev_interrupt_handler(void *param);
40 static void txgbe_dev_interrupt_delayed_handler(void *param);
41 static void txgbe_configure_msix(struct rte_eth_dev *dev);
44 * The set of PCI devices this driver supports
46 static const struct rte_pci_id pci_id_txgbe_map[] = {
47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
49 { .vendor_id = 0, /* sentinel */ },
52 static const struct rte_eth_desc_lim rx_desc_lim = {
53 .nb_max = TXGBE_RING_DESC_MAX,
54 .nb_min = TXGBE_RING_DESC_MIN,
55 .nb_align = TXGBE_RXD_ALIGN,
58 static const struct rte_eth_desc_lim tx_desc_lim = {
59 .nb_max = TXGBE_RING_DESC_MAX,
60 .nb_min = TXGBE_RING_DESC_MIN,
61 .nb_align = TXGBE_TXD_ALIGN,
62 .nb_seg_max = TXGBE_TX_MAX_SEG,
63 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
66 static const struct eth_dev_ops txgbe_eth_dev_ops;
68 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
69 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
70 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
72 HW_XSTAT(mng_bmc2host_packets),
73 HW_XSTAT(mng_host2bmc_packets),
79 HW_XSTAT(rx_total_bytes),
80 HW_XSTAT(rx_total_packets),
81 HW_XSTAT(tx_total_packets),
82 HW_XSTAT(rx_total_missed_packets),
83 HW_XSTAT(rx_broadcast_packets),
84 HW_XSTAT(rx_multicast_packets),
85 HW_XSTAT(rx_management_packets),
86 HW_XSTAT(tx_management_packets),
87 HW_XSTAT(rx_management_dropped),
90 HW_XSTAT(rx_crc_errors),
91 HW_XSTAT(rx_illegal_byte_errors),
92 HW_XSTAT(rx_error_bytes),
93 HW_XSTAT(rx_mac_short_packet_dropped),
94 HW_XSTAT(rx_length_errors),
95 HW_XSTAT(rx_undersize_errors),
96 HW_XSTAT(rx_fragment_errors),
97 HW_XSTAT(rx_oversize_errors),
98 HW_XSTAT(rx_jabber_errors),
99 HW_XSTAT(rx_l3_l4_xsum_error),
100 HW_XSTAT(mac_local_errors),
101 HW_XSTAT(mac_remote_errors),
104 HW_XSTAT(flow_director_added_filters),
105 HW_XSTAT(flow_director_removed_filters),
106 HW_XSTAT(flow_director_filter_add_errors),
107 HW_XSTAT(flow_director_filter_remove_errors),
108 HW_XSTAT(flow_director_matched_filters),
109 HW_XSTAT(flow_director_missed_filters),
112 HW_XSTAT(rx_fcoe_crc_errors),
113 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
114 HW_XSTAT(rx_fcoe_dropped),
115 HW_XSTAT(rx_fcoe_packets),
116 HW_XSTAT(tx_fcoe_packets),
117 HW_XSTAT(rx_fcoe_bytes),
118 HW_XSTAT(tx_fcoe_bytes),
119 HW_XSTAT(rx_fcoe_no_ddp),
120 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
123 HW_XSTAT(tx_macsec_pkts_untagged),
124 HW_XSTAT(tx_macsec_pkts_encrypted),
125 HW_XSTAT(tx_macsec_pkts_protected),
126 HW_XSTAT(tx_macsec_octets_encrypted),
127 HW_XSTAT(tx_macsec_octets_protected),
128 HW_XSTAT(rx_macsec_pkts_untagged),
129 HW_XSTAT(rx_macsec_pkts_badtag),
130 HW_XSTAT(rx_macsec_pkts_nosci),
131 HW_XSTAT(rx_macsec_pkts_unknownsci),
132 HW_XSTAT(rx_macsec_octets_decrypted),
133 HW_XSTAT(rx_macsec_octets_validated),
134 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
135 HW_XSTAT(rx_macsec_sc_pkts_delayed),
136 HW_XSTAT(rx_macsec_sc_pkts_late),
137 HW_XSTAT(rx_macsec_sa_pkts_ok),
138 HW_XSTAT(rx_macsec_sa_pkts_invalid),
139 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
140 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
141 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
144 HW_XSTAT(rx_size_64_packets),
145 HW_XSTAT(rx_size_65_to_127_packets),
146 HW_XSTAT(rx_size_128_to_255_packets),
147 HW_XSTAT(rx_size_256_to_511_packets),
148 HW_XSTAT(rx_size_512_to_1023_packets),
149 HW_XSTAT(rx_size_1024_to_max_packets),
150 HW_XSTAT(tx_size_64_packets),
151 HW_XSTAT(tx_size_65_to_127_packets),
152 HW_XSTAT(tx_size_128_to_255_packets),
153 HW_XSTAT(tx_size_256_to_511_packets),
154 HW_XSTAT(tx_size_512_to_1023_packets),
155 HW_XSTAT(tx_size_1024_to_max_packets),
158 HW_XSTAT(tx_xon_packets),
159 HW_XSTAT(rx_xon_packets),
160 HW_XSTAT(tx_xoff_packets),
161 HW_XSTAT(rx_xoff_packets),
163 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
164 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
165 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
166 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
169 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
170 sizeof(rte_txgbe_stats_strings[0]))
172 /* Per-priority statistics */
173 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
174 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
175 UP_XSTAT(rx_up_packets),
176 UP_XSTAT(tx_up_packets),
177 UP_XSTAT(rx_up_bytes),
178 UP_XSTAT(tx_up_bytes),
179 UP_XSTAT(rx_up_drop_packets),
181 UP_XSTAT(tx_up_xon_packets),
182 UP_XSTAT(rx_up_xon_packets),
183 UP_XSTAT(tx_up_xoff_packets),
184 UP_XSTAT(rx_up_xoff_packets),
185 UP_XSTAT(rx_up_dropped),
186 UP_XSTAT(rx_up_mbuf_alloc_errors),
187 UP_XSTAT(tx_up_xon2off_packets),
190 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
191 sizeof(rte_txgbe_up_strings[0]))
193 /* Per-queue statistics */
194 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
196 QP_XSTAT(rx_qp_packets),
197 QP_XSTAT(tx_qp_packets),
198 QP_XSTAT(rx_qp_bytes),
199 QP_XSTAT(tx_qp_bytes),
200 QP_XSTAT(rx_qp_mc_packets),
203 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
204 sizeof(rte_txgbe_qp_strings[0]))
207 txgbe_is_sfp(struct txgbe_hw *hw)
209 switch (hw->phy.type) {
210 case txgbe_phy_sfp_avago:
211 case txgbe_phy_sfp_ftl:
212 case txgbe_phy_sfp_intel:
213 case txgbe_phy_sfp_unknown:
214 case txgbe_phy_sfp_tyco_passive:
215 case txgbe_phy_sfp_unknown_passive:
222 static inline int32_t
223 txgbe_pf_reset_hw(struct txgbe_hw *hw)
228 status = hw->mac.reset_hw(hw);
230 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
231 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
232 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
233 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
236 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
242 txgbe_enable_intr(struct rte_eth_dev *dev)
244 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
245 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
247 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
248 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
249 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
254 txgbe_disable_intr(struct txgbe_hw *hw)
256 PMD_INIT_FUNC_TRACE();
258 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
259 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
260 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
265 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
270 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
271 struct txgbe_stat_mappings *stat_mappings =
272 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
273 uint32_t qsmr_mask = 0;
274 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
278 if (hw->mac.type != txgbe_mac_raptor)
281 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
284 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
285 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
288 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
289 if (n >= TXGBE_NB_STAT_MAPPING) {
290 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
293 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
295 /* Now clear any previous stat_idx set */
296 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
298 stat_mappings->tqsm[n] &= ~clearing_mask;
300 stat_mappings->rqsm[n] &= ~clearing_mask;
302 q_map = (uint32_t)stat_idx;
303 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
304 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
306 stat_mappings->tqsm[n] |= qsmr_mask;
308 stat_mappings->rqsm[n] |= qsmr_mask;
310 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
311 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
313 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
314 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
319 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
321 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
322 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
323 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
324 const struct rte_memzone *mz;
328 PMD_INIT_FUNC_TRACE();
330 eth_dev->dev_ops = &txgbe_eth_dev_ops;
331 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
332 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
333 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
336 * For secondary processes, we don't initialise any further as primary
337 * has already done this work. Only check we don't need a different
338 * RX and TX function.
340 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
341 struct txgbe_tx_queue *txq;
342 /* TX queue function in primary, set by last queue initialized
343 * Tx queue may not initialized by primary process
345 if (eth_dev->data->tx_queues) {
346 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
347 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
348 txgbe_set_tx_function(eth_dev, txq);
350 /* Use default TX function if we get here */
351 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
352 "Using default TX function.");
355 txgbe_set_rx_function(eth_dev);
360 rte_eth_copy_pci_info(eth_dev, pci_dev);
362 /* Vendor and Device ID need to be set before init of shared code */
363 hw->device_id = pci_dev->id.device_id;
364 hw->vendor_id = pci_dev->id.vendor_id;
365 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
366 hw->allow_unsupported_sfp = 1;
368 /* Reserve memory for interrupt status block */
369 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
370 16, TXGBE_ALIGN, SOCKET_ID_ANY);
374 hw->isb_dma = TMZ_PADDR(mz);
375 hw->isb_mem = TMZ_VADDR(mz);
377 /* Initialize the shared code (base driver) */
378 err = txgbe_init_shared_code(hw);
380 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
384 err = hw->rom.init_params(hw);
386 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
390 /* Make sure we have a good EEPROM before we read from it */
391 err = hw->rom.validate_checksum(hw, &csum);
393 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
397 err = hw->mac.init_hw(hw);
400 * Devices with copper phys will fail to initialise if txgbe_init_hw()
401 * is called too soon after the kernel driver unbinding/binding occurs.
402 * The failure occurs in txgbe_identify_phy() for all devices,
403 * but for non-copper devies, txgbe_identify_sfp_module() is
404 * also called. See txgbe_identify_phy(). The reason for the
405 * failure is not known, and only occuts when virtualisation features
406 * are disabled in the bios. A delay of 200ms was found to be enough by
407 * trial-and-error, and is doubled to be safe.
409 if (err && hw->phy.media_type == txgbe_media_type_copper) {
411 err = hw->mac.init_hw(hw);
414 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
417 if (err == TXGBE_ERR_EEPROM_VERSION) {
418 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
419 "LOM. Please be aware there may be issues associated "
420 "with your hardware.");
421 PMD_INIT_LOG(ERR, "If you are experiencing problems "
422 "please contact your hardware representative "
423 "who provided you with this hardware.");
424 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
425 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
428 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
432 /* Reset the hw statistics */
433 txgbe_dev_stats_reset(eth_dev);
435 /* disable interrupt */
436 txgbe_disable_intr(hw);
438 /* Allocate memory for storing MAC addresses */
439 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
440 hw->mac.num_rar_entries, 0);
441 if (eth_dev->data->mac_addrs == NULL) {
443 "Failed to allocate %u bytes needed to store "
445 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
449 /* Copy the permanent MAC address */
450 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
451 ð_dev->data->mac_addrs[0]);
453 /* Allocate memory for storing hash filter MAC addresses */
454 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
455 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
456 if (eth_dev->data->hash_mac_addrs == NULL) {
458 "Failed to allocate %d bytes needed to store MAC addresses",
459 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
463 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
464 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
465 (int)hw->mac.type, (int)hw->phy.type,
466 (int)hw->phy.sfp_type);
468 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
469 (int)hw->mac.type, (int)hw->phy.type);
471 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
472 eth_dev->data->port_id, pci_dev->id.vendor_id,
473 pci_dev->id.device_id);
475 rte_intr_callback_register(intr_handle,
476 txgbe_dev_interrupt_handler, eth_dev);
478 /* enable uio/vfio intr/eventfd mapping */
479 rte_intr_enable(intr_handle);
481 /* enable support intr */
482 txgbe_enable_intr(eth_dev);
488 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
490 PMD_INIT_FUNC_TRACE();
492 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
495 txgbe_dev_close(eth_dev);
501 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
502 struct rte_pci_device *pci_dev)
504 struct rte_eth_dev *pf_ethdev;
505 struct rte_eth_devargs eth_da;
508 if (pci_dev->device.devargs) {
509 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
514 memset(ð_da, 0, sizeof(eth_da));
517 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
518 sizeof(struct txgbe_adapter),
519 eth_dev_pci_specific_init, pci_dev,
520 eth_txgbe_dev_init, NULL);
522 if (retval || eth_da.nb_representor_ports < 1)
525 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
526 if (pf_ethdev == NULL)
532 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
534 struct rte_eth_dev *ethdev;
536 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
540 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
543 static struct rte_pci_driver rte_txgbe_pmd = {
544 .id_table = pci_id_txgbe_map,
545 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
546 RTE_PCI_DRV_INTR_LSC,
547 .probe = eth_txgbe_pci_probe,
548 .remove = eth_txgbe_pci_remove,
552 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
554 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
559 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
562 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
568 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
569 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
570 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
571 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
576 txgbe_check_mq_mode(struct rte_eth_dev *dev)
578 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
579 uint16_t nb_rx_q = dev->data->nb_rx_queues;
580 uint16_t nb_tx_q = dev->data->nb_tx_queues;
582 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
583 /* check multi-queue mode */
584 switch (dev_conf->rxmode.mq_mode) {
585 case ETH_MQ_RX_VMDQ_DCB:
586 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
588 case ETH_MQ_RX_VMDQ_DCB_RSS:
589 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
590 PMD_INIT_LOG(ERR, "SRIOV active,"
591 " unsupported mq_mode rx %d.",
592 dev_conf->rxmode.mq_mode);
595 case ETH_MQ_RX_VMDQ_RSS:
596 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
597 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
598 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
599 PMD_INIT_LOG(ERR, "SRIOV is active,"
600 " invalid queue number"
601 " for VMDQ RSS, allowed"
602 " value are 1, 2 or 4.");
606 case ETH_MQ_RX_VMDQ_ONLY:
608 /* if nothing mq mode configure, use default scheme */
609 dev->data->dev_conf.rxmode.mq_mode =
612 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
613 /* SRIOV only works in VMDq enable mode */
614 PMD_INIT_LOG(ERR, "SRIOV is active,"
615 " wrong mq_mode rx %d.",
616 dev_conf->rxmode.mq_mode);
620 switch (dev_conf->txmode.mq_mode) {
621 case ETH_MQ_TX_VMDQ_DCB:
622 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
623 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
625 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
626 dev->data->dev_conf.txmode.mq_mode =
631 /* check valid queue number */
632 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
633 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
634 PMD_INIT_LOG(ERR, "SRIOV is active,"
635 " nb_rx_q=%d nb_tx_q=%d queue number"
636 " must be less than or equal to %d.",
638 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
642 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
643 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
647 /* check configuration for vmdb+dcb mode */
648 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
649 const struct rte_eth_vmdq_dcb_conf *conf;
651 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
652 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
653 TXGBE_VMDQ_DCB_NB_QUEUES);
656 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
657 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
658 conf->nb_queue_pools == ETH_32_POOLS)) {
659 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
660 " nb_queue_pools must be %d or %d.",
661 ETH_16_POOLS, ETH_32_POOLS);
665 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
666 const struct rte_eth_vmdq_dcb_tx_conf *conf;
668 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
669 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
670 TXGBE_VMDQ_DCB_NB_QUEUES);
673 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
674 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
675 conf->nb_queue_pools == ETH_32_POOLS)) {
676 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
677 " nb_queue_pools != %d and"
678 " nb_queue_pools != %d.",
679 ETH_16_POOLS, ETH_32_POOLS);
684 /* For DCB mode check our configuration before we go further */
685 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
686 const struct rte_eth_dcb_rx_conf *conf;
688 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
689 if (!(conf->nb_tcs == ETH_4_TCS ||
690 conf->nb_tcs == ETH_8_TCS)) {
691 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
692 " and nb_tcs != %d.",
693 ETH_4_TCS, ETH_8_TCS);
698 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
699 const struct rte_eth_dcb_tx_conf *conf;
701 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
702 if (!(conf->nb_tcs == ETH_4_TCS ||
703 conf->nb_tcs == ETH_8_TCS)) {
704 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
705 " and nb_tcs != %d.",
706 ETH_4_TCS, ETH_8_TCS);
715 txgbe_dev_configure(struct rte_eth_dev *dev)
717 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
718 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
721 PMD_INIT_FUNC_TRACE();
723 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
724 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
726 /* multiple queue mode checking */
727 ret = txgbe_check_mq_mode(dev);
729 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
734 /* set flag to update link status after init */
735 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
738 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
739 * allocation Rx preconditions we will reset it.
741 adapter->rx_bulk_alloc_allowed = true;
747 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
749 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
750 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
753 gpie = rd32(hw, TXGBE_GPIOINTEN);
754 gpie |= TXGBE_GPIOBIT_6;
755 wr32(hw, TXGBE_GPIOINTEN, gpie);
756 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
760 * Configure device link speed and setup link.
761 * It returns 0 on success.
764 txgbe_dev_start(struct rte_eth_dev *dev)
766 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
767 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
768 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
769 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
770 uint32_t intr_vector = 0;
772 bool link_up = false, negotiate = 0;
774 uint32_t allowed_speeds = 0;
776 uint32_t *link_speeds;
778 PMD_INIT_FUNC_TRACE();
780 /* TXGBE devices don't support:
781 * - half duplex (checked afterwards for valid speeds)
782 * - fixed speed: TODO implement
784 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
786 "Invalid link_speeds for port %u, fix speed not supported",
791 /* Stop the link setup handler before resetting the HW. */
792 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
794 /* disable uio/vfio intr/eventfd mapping */
795 rte_intr_disable(intr_handle);
798 hw->adapter_stopped = 0;
801 /* reinitialize adapter
802 * this calls reset and start
804 hw->nb_rx_queues = dev->data->nb_rx_queues;
805 hw->nb_tx_queues = dev->data->nb_tx_queues;
806 status = txgbe_pf_reset_hw(hw);
809 hw->mac.start_hw(hw);
810 hw->mac.get_link_status = true;
812 txgbe_dev_phy_intr_setup(dev);
814 /* check and configure queue intr-vector mapping */
815 if ((rte_intr_cap_multiple(intr_handle) ||
816 !RTE_ETH_DEV_SRIOV(dev).active) &&
817 dev->data->dev_conf.intr_conf.rxq != 0) {
818 intr_vector = dev->data->nb_rx_queues;
819 if (rte_intr_efd_enable(intr_handle, intr_vector))
823 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
824 intr_handle->intr_vec =
825 rte_zmalloc("intr_vec",
826 dev->data->nb_rx_queues * sizeof(int), 0);
827 if (intr_handle->intr_vec == NULL) {
828 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
829 " intr_vec", dev->data->nb_rx_queues);
834 /* confiugre msix for sleep until rx interrupt */
835 txgbe_configure_msix(dev);
837 /* initialize transmission unit */
838 txgbe_dev_tx_init(dev);
840 /* This can fail when allocating mbufs for descriptor rings */
841 err = txgbe_dev_rx_init(dev);
843 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
847 err = txgbe_dev_rxtx_start(dev);
849 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
853 /* Skip link setup if loopback mode is enabled. */
854 if (hw->mac.type == txgbe_mac_raptor &&
855 dev->data->dev_conf.lpbk_mode)
856 goto skip_link_setup;
858 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
859 err = hw->mac.setup_sfp(hw);
864 if (hw->phy.media_type == txgbe_media_type_copper) {
865 /* Turn on the copper */
866 hw->phy.set_phy_power(hw, true);
868 /* Turn on the laser */
869 hw->mac.enable_tx_laser(hw);
872 err = hw->mac.check_link(hw, &speed, &link_up, 0);
875 dev->data->dev_link.link_status = link_up;
877 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
881 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
884 link_speeds = &dev->data->dev_conf.link_speeds;
885 if (*link_speeds & ~allowed_speeds) {
886 PMD_INIT_LOG(ERR, "Invalid link setting");
891 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
892 speed = (TXGBE_LINK_SPEED_100M_FULL |
893 TXGBE_LINK_SPEED_1GB_FULL |
894 TXGBE_LINK_SPEED_10GB_FULL);
896 if (*link_speeds & ETH_LINK_SPEED_10G)
897 speed |= TXGBE_LINK_SPEED_10GB_FULL;
898 if (*link_speeds & ETH_LINK_SPEED_5G)
899 speed |= TXGBE_LINK_SPEED_5GB_FULL;
900 if (*link_speeds & ETH_LINK_SPEED_2_5G)
901 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
902 if (*link_speeds & ETH_LINK_SPEED_1G)
903 speed |= TXGBE_LINK_SPEED_1GB_FULL;
904 if (*link_speeds & ETH_LINK_SPEED_100M)
905 speed |= TXGBE_LINK_SPEED_100M_FULL;
908 err = hw->mac.setup_link(hw, speed, link_up);
914 if (rte_intr_allow_others(intr_handle)) {
915 /* check if lsc interrupt is enabled */
916 if (dev->data->dev_conf.intr_conf.lsc != 0)
917 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
919 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
920 txgbe_dev_macsec_interrupt_setup(dev);
921 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
923 rte_intr_callback_unregister(intr_handle,
924 txgbe_dev_interrupt_handler, dev);
925 if (dev->data->dev_conf.intr_conf.lsc != 0)
926 PMD_INIT_LOG(INFO, "lsc won't enable because of"
927 " no intr multiplex");
930 /* check if rxq interrupt is enabled */
931 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
932 rte_intr_dp_is_en(intr_handle))
933 txgbe_dev_rxq_interrupt_setup(dev);
935 /* enable uio/vfio intr/eventfd mapping */
936 rte_intr_enable(intr_handle);
938 /* resume enabled intr since hw reset */
939 txgbe_enable_intr(dev);
942 * Update link status right before return, because it may
943 * start link configuration process in a separate thread.
945 txgbe_dev_link_update(dev, 0);
947 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
949 txgbe_read_stats_registers(hw, hw_stats);
950 hw->offset_loaded = 1;
955 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
956 txgbe_dev_clear_queues(dev);
961 * Stop device: disable rx and tx functions to allow for reconfiguring.
964 txgbe_dev_stop(struct rte_eth_dev *dev)
966 struct rte_eth_link link;
967 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
968 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
969 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
971 if (hw->adapter_stopped)
974 PMD_INIT_FUNC_TRACE();
976 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
978 /* disable interrupts */
979 txgbe_disable_intr(hw);
982 txgbe_pf_reset_hw(hw);
983 hw->adapter_stopped = 0;
988 if (hw->phy.media_type == txgbe_media_type_copper) {
989 /* Turn off the copper */
990 hw->phy.set_phy_power(hw, false);
992 /* Turn off the laser */
993 hw->mac.disable_tx_laser(hw);
996 txgbe_dev_clear_queues(dev);
998 /* Clear stored conf */
999 dev->data->scattered_rx = 0;
1002 /* Clear recorded link status */
1003 memset(&link, 0, sizeof(link));
1004 rte_eth_linkstatus_set(dev, &link);
1006 if (!rte_intr_allow_others(intr_handle))
1007 /* resume to the default handler */
1008 rte_intr_callback_register(intr_handle,
1009 txgbe_dev_interrupt_handler,
1012 /* Clean datapath event and queue/vec mapping */
1013 rte_intr_efd_disable(intr_handle);
1014 if (intr_handle->intr_vec != NULL) {
1015 rte_free(intr_handle->intr_vec);
1016 intr_handle->intr_vec = NULL;
1019 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1021 hw->adapter_stopped = true;
1022 dev->data->dev_started = 0;
1028 * Set device link up: enable tx.
1031 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1033 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1035 if (hw->phy.media_type == txgbe_media_type_copper) {
1036 /* Turn on the copper */
1037 hw->phy.set_phy_power(hw, true);
1039 /* Turn on the laser */
1040 hw->mac.enable_tx_laser(hw);
1041 txgbe_dev_link_update(dev, 0);
1048 * Set device link down: disable tx.
1051 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1053 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1055 if (hw->phy.media_type == txgbe_media_type_copper) {
1056 /* Turn off the copper */
1057 hw->phy.set_phy_power(hw, false);
1059 /* Turn off the laser */
1060 hw->mac.disable_tx_laser(hw);
1061 txgbe_dev_link_update(dev, 0);
1068 * Reset and stop device.
1071 txgbe_dev_close(struct rte_eth_dev *dev)
1073 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1074 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1075 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1079 PMD_INIT_FUNC_TRACE();
1081 txgbe_pf_reset_hw(hw);
1083 ret = txgbe_dev_stop(dev);
1085 txgbe_dev_free_queues(dev);
1087 /* reprogram the RAR[0] in case user changed it. */
1088 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1090 /* disable uio intr before callback unregister */
1091 rte_intr_disable(intr_handle);
1094 ret = rte_intr_callback_unregister(intr_handle,
1095 txgbe_dev_interrupt_handler, dev);
1096 if (ret >= 0 || ret == -ENOENT) {
1098 } else if (ret != -EAGAIN) {
1100 "intr callback unregister failed: %d",
1104 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1106 /* cancel the delay handler before remove dev */
1107 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1109 rte_free(dev->data->mac_addrs);
1110 dev->data->mac_addrs = NULL;
1112 rte_free(dev->data->hash_mac_addrs);
1113 dev->data->hash_mac_addrs = NULL;
1122 txgbe_dev_reset(struct rte_eth_dev *dev)
1126 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1127 * its VF to make them align with it. The detailed notification
1128 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1129 * To avoid unexpected behavior in VF, currently reset of PF with
1130 * SR-IOV activation is not supported. It might be supported later.
1132 if (dev->data->sriov.active)
1135 ret = eth_txgbe_dev_uninit(dev);
1139 ret = eth_txgbe_dev_init(dev, NULL);
1144 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1146 uint32_t current_counter = rd32(hw, reg); \
1147 if (current_counter < last_counter) \
1148 current_counter += 0x100000000LL; \
1149 if (!hw->offset_loaded) \
1150 last_counter = current_counter; \
1151 counter = current_counter - last_counter; \
1152 counter &= 0xFFFFFFFFLL; \
1155 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1157 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1158 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1159 uint64_t current_counter = (current_counter_msb << 32) | \
1160 current_counter_lsb; \
1161 if (current_counter < last_counter) \
1162 current_counter += 0x1000000000LL; \
1163 if (!hw->offset_loaded) \
1164 last_counter = current_counter; \
1165 counter = current_counter - last_counter; \
1166 counter &= 0xFFFFFFFFFLL; \
1170 txgbe_read_stats_registers(struct txgbe_hw *hw,
1171 struct txgbe_hw_stats *hw_stats)
1176 for (i = 0; i < hw->nb_rx_queues; i++) {
1177 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1178 hw->qp_last[i].rx_qp_packets,
1179 hw_stats->qp[i].rx_qp_packets);
1180 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1181 hw->qp_last[i].rx_qp_bytes,
1182 hw_stats->qp[i].rx_qp_bytes);
1183 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1184 hw->qp_last[i].rx_qp_mc_packets,
1185 hw_stats->qp[i].rx_qp_mc_packets);
1188 for (i = 0; i < hw->nb_tx_queues; i++) {
1189 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1190 hw->qp_last[i].tx_qp_packets,
1191 hw_stats->qp[i].tx_qp_packets);
1192 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1193 hw->qp_last[i].tx_qp_bytes,
1194 hw_stats->qp[i].tx_qp_bytes);
1197 for (i = 0; i < TXGBE_MAX_UP; i++) {
1198 hw_stats->up[i].rx_up_xon_packets +=
1199 rd32(hw, TXGBE_PBRXUPXON(i));
1200 hw_stats->up[i].rx_up_xoff_packets +=
1201 rd32(hw, TXGBE_PBRXUPXOFF(i));
1202 hw_stats->up[i].tx_up_xon_packets +=
1203 rd32(hw, TXGBE_PBTXUPXON(i));
1204 hw_stats->up[i].tx_up_xoff_packets +=
1205 rd32(hw, TXGBE_PBTXUPXOFF(i));
1206 hw_stats->up[i].tx_up_xon2off_packets +=
1207 rd32(hw, TXGBE_PBTXUPOFF(i));
1208 hw_stats->up[i].rx_up_dropped +=
1209 rd32(hw, TXGBE_PBRXMISS(i));
1211 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1212 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1213 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1214 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1217 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1218 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1220 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1221 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1222 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1225 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1226 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1227 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1229 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1230 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1231 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1233 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1234 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1236 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1237 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1238 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1239 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1240 hw_stats->rx_size_512_to_1023_packets +=
1241 rd64(hw, TXGBE_MACRX512TO1023L);
1242 hw_stats->rx_size_1024_to_max_packets +=
1243 rd64(hw, TXGBE_MACRX1024TOMAXL);
1244 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1245 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1246 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1247 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1248 hw_stats->tx_size_512_to_1023_packets +=
1249 rd64(hw, TXGBE_MACTX512TO1023L);
1250 hw_stats->tx_size_1024_to_max_packets +=
1251 rd64(hw, TXGBE_MACTX1024TOMAXL);
1253 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1254 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1255 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1258 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1259 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1260 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1261 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1264 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1265 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1266 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1267 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1268 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1269 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1270 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1272 /* Flow Director Stats */
1273 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1274 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1275 hw_stats->flow_director_added_filters +=
1276 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1277 hw_stats->flow_director_removed_filters +=
1278 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1279 hw_stats->flow_director_filter_add_errors +=
1280 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1281 hw_stats->flow_director_filter_remove_errors +=
1282 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1285 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1286 hw_stats->tx_macsec_pkts_encrypted +=
1287 rd32(hw, TXGBE_LSECTX_ENCPKT);
1288 hw_stats->tx_macsec_pkts_protected +=
1289 rd32(hw, TXGBE_LSECTX_PROTPKT);
1290 hw_stats->tx_macsec_octets_encrypted +=
1291 rd32(hw, TXGBE_LSECTX_ENCOCT);
1292 hw_stats->tx_macsec_octets_protected +=
1293 rd32(hw, TXGBE_LSECTX_PROTOCT);
1294 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1295 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1296 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1297 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1298 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1299 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1300 hw_stats->rx_macsec_sc_pkts_unchecked +=
1301 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1302 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1303 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1304 for (i = 0; i < 2; i++) {
1305 hw_stats->rx_macsec_sa_pkts_ok +=
1306 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1307 hw_stats->rx_macsec_sa_pkts_invalid +=
1308 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1309 hw_stats->rx_macsec_sa_pkts_notvalid +=
1310 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1312 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1313 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1314 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1315 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1317 hw_stats->rx_total_missed_packets = 0;
1318 for (i = 0; i < TXGBE_MAX_UP; i++) {
1319 hw_stats->rx_total_missed_packets +=
1320 hw_stats->up[i].rx_up_dropped;
1325 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1327 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1328 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1329 struct txgbe_stat_mappings *stat_mappings =
1330 TXGBE_DEV_STAT_MAPPINGS(dev);
1333 txgbe_read_stats_registers(hw, hw_stats);
1338 /* Fill out the rte_eth_stats statistics structure */
1339 stats->ipackets = hw_stats->rx_packets;
1340 stats->ibytes = hw_stats->rx_bytes;
1341 stats->opackets = hw_stats->tx_packets;
1342 stats->obytes = hw_stats->tx_bytes;
1344 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1345 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1346 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1347 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1348 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1349 for (i = 0; i < TXGBE_MAX_QP; i++) {
1350 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1351 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1354 q_map = (stat_mappings->rqsm[n] >> offset)
1355 & QMAP_FIELD_RESERVED_BITS_MASK;
1356 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1357 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1358 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1359 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1361 q_map = (stat_mappings->tqsm[n] >> offset)
1362 & QMAP_FIELD_RESERVED_BITS_MASK;
1363 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1364 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1365 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1366 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1370 stats->imissed = hw_stats->rx_total_missed_packets;
1371 stats->ierrors = hw_stats->rx_crc_errors +
1372 hw_stats->rx_mac_short_packet_dropped +
1373 hw_stats->rx_length_errors +
1374 hw_stats->rx_undersize_errors +
1375 hw_stats->rx_oversize_errors +
1376 hw_stats->rx_drop_packets +
1377 hw_stats->rx_illegal_byte_errors +
1378 hw_stats->rx_error_bytes +
1379 hw_stats->rx_fragment_errors +
1380 hw_stats->rx_fcoe_crc_errors +
1381 hw_stats->rx_fcoe_mbuf_allocation_errors;
1389 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1391 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1392 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1394 /* HW registers are cleared on read */
1395 hw->offset_loaded = 0;
1396 txgbe_dev_stats_get(dev, NULL);
1397 hw->offset_loaded = 1;
1399 /* Reset software totals */
1400 memset(hw_stats, 0, sizeof(*hw_stats));
1405 /* This function calculates the number of xstats based on the current config */
1407 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1409 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1410 return TXGBE_NB_HW_STATS +
1411 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1412 TXGBE_NB_QP_STATS * nb_queues;
1416 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1420 /* Extended stats from txgbe_hw_stats */
1421 if (id < TXGBE_NB_HW_STATS) {
1422 snprintf(name, size, "[hw]%s",
1423 rte_txgbe_stats_strings[id].name);
1426 id -= TXGBE_NB_HW_STATS;
1428 /* Priority Stats */
1429 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1430 nb = id / TXGBE_NB_UP_STATS;
1431 st = id % TXGBE_NB_UP_STATS;
1432 snprintf(name, size, "[p%u]%s", nb,
1433 rte_txgbe_up_strings[st].name);
1436 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1439 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1440 nb = id / TXGBE_NB_QP_STATS;
1441 st = id % TXGBE_NB_QP_STATS;
1442 snprintf(name, size, "[q%u]%s", nb,
1443 rte_txgbe_qp_strings[st].name);
1446 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1448 return -(int)(id + 1);
1452 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1456 /* Extended stats from txgbe_hw_stats */
1457 if (id < TXGBE_NB_HW_STATS) {
1458 *offset = rte_txgbe_stats_strings[id].offset;
1461 id -= TXGBE_NB_HW_STATS;
1463 /* Priority Stats */
1464 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1465 nb = id / TXGBE_NB_UP_STATS;
1466 st = id % TXGBE_NB_UP_STATS;
1467 *offset = rte_txgbe_up_strings[st].offset +
1468 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1471 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1474 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1475 nb = id / TXGBE_NB_QP_STATS;
1476 st = id % TXGBE_NB_QP_STATS;
1477 *offset = rte_txgbe_qp_strings[st].offset +
1478 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
1481 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1483 return -(int)(id + 1);
1486 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1487 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1489 unsigned int i, count;
1491 count = txgbe_xstats_calc_num(dev);
1492 if (xstats_names == NULL)
1495 /* Note: limit >= cnt_stats checked upstream
1496 * in rte_eth_xstats_names()
1498 limit = min(limit, count);
1500 /* Extended stats from txgbe_hw_stats */
1501 for (i = 0; i < limit; i++) {
1502 if (txgbe_get_name_by_id(i, xstats_names[i].name,
1503 sizeof(xstats_names[i].name))) {
1504 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1512 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1513 struct rte_eth_xstat_name *xstats_names,
1514 const uint64_t *ids,
1520 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
1522 for (i = 0; i < limit; i++) {
1523 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
1524 sizeof(xstats_names[i].name))) {
1525 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1534 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1537 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1538 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1539 unsigned int i, count;
1541 txgbe_read_stats_registers(hw, hw_stats);
1543 /* If this is a reset xstats is NULL, and we have cleared the
1544 * registers by reading them.
1546 count = txgbe_xstats_calc_num(dev);
1550 limit = min(limit, txgbe_xstats_calc_num(dev));
1552 /* Extended stats from txgbe_hw_stats */
1553 for (i = 0; i < limit; i++) {
1554 uint32_t offset = 0;
1556 if (txgbe_get_offset_by_id(i, &offset)) {
1557 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1560 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1568 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1571 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1572 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1573 unsigned int i, count;
1575 txgbe_read_stats_registers(hw, hw_stats);
1577 /* If this is a reset xstats is NULL, and we have cleared the
1578 * registers by reading them.
1580 count = txgbe_xstats_calc_num(dev);
1584 limit = min(limit, txgbe_xstats_calc_num(dev));
1586 /* Extended stats from txgbe_hw_stats */
1587 for (i = 0; i < limit; i++) {
1590 if (txgbe_get_offset_by_id(i, &offset)) {
1591 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1594 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1601 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1602 uint64_t *values, unsigned int limit)
1604 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1608 return txgbe_dev_xstats_get_(dev, values, limit);
1610 for (i = 0; i < limit; i++) {
1613 if (txgbe_get_offset_by_id(ids[i], &offset)) {
1614 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1617 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1624 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1626 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1627 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1629 /* HW registers are cleared on read */
1630 hw->offset_loaded = 0;
1631 txgbe_read_stats_registers(hw, hw_stats);
1632 hw->offset_loaded = 1;
1634 /* Reset software totals */
1635 memset(hw_stats, 0, sizeof(*hw_stats));
1641 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1643 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1644 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1646 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1647 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1648 dev_info->min_rx_bufsize = 1024;
1649 dev_info->max_rx_pktlen = 15872;
1650 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1651 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
1652 dev_info->max_vfs = pci_dev->max_vfs;
1653 dev_info->max_vmdq_pools = ETH_64_POOLS;
1654 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
1655 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
1656 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
1657 dev_info->rx_queue_offload_capa);
1658 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
1659 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
1661 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1663 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
1664 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
1665 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
1667 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
1672 dev_info->default_txconf = (struct rte_eth_txconf) {
1674 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
1675 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
1676 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
1678 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
1682 dev_info->rx_desc_lim = rx_desc_lim;
1683 dev_info->tx_desc_lim = tx_desc_lim;
1685 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1686 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1687 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
1689 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1690 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1692 /* Driver-preferred Rx/Tx parameters */
1693 dev_info->default_rxportconf.burst_size = 32;
1694 dev_info->default_txportconf.burst_size = 32;
1695 dev_info->default_rxportconf.nb_queues = 1;
1696 dev_info->default_txportconf.nb_queues = 1;
1697 dev_info->default_rxportconf.ring_size = 256;
1698 dev_info->default_txportconf.ring_size = 256;
1704 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1706 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
1707 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
1708 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
1709 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
1710 return txgbe_get_supported_ptypes();
1716 txgbe_dev_setup_link_alarm_handler(void *param)
1718 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1719 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1720 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1722 bool autoneg = false;
1724 speed = hw->phy.autoneg_advertised;
1726 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1728 hw->mac.setup_link(hw, speed, true);
1730 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1733 /* return 0 means link status changed, -1 means not changed */
1735 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
1736 int wait_to_complete)
1738 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1739 struct rte_eth_link link;
1740 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
1741 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1746 memset(&link, 0, sizeof(link));
1747 link.link_status = ETH_LINK_DOWN;
1748 link.link_speed = ETH_SPEED_NUM_NONE;
1749 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1750 link.link_autoneg = ETH_LINK_AUTONEG;
1752 hw->mac.get_link_status = true;
1754 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
1755 return rte_eth_linkstatus_set(dev, &link);
1757 /* check if it needs to wait to complete, if lsc interrupt is enabled */
1758 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1761 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1764 link.link_speed = ETH_SPEED_NUM_100M;
1765 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1766 return rte_eth_linkstatus_set(dev, &link);
1770 if (hw->phy.media_type == txgbe_media_type_fiber) {
1771 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
1772 rte_eal_alarm_set(10,
1773 txgbe_dev_setup_link_alarm_handler, dev);
1775 return rte_eth_linkstatus_set(dev, &link);
1778 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1779 link.link_status = ETH_LINK_UP;
1780 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1782 switch (link_speed) {
1784 case TXGBE_LINK_SPEED_UNKNOWN:
1785 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1786 link.link_speed = ETH_SPEED_NUM_100M;
1789 case TXGBE_LINK_SPEED_100M_FULL:
1790 link.link_speed = ETH_SPEED_NUM_100M;
1793 case TXGBE_LINK_SPEED_1GB_FULL:
1794 link.link_speed = ETH_SPEED_NUM_1G;
1797 case TXGBE_LINK_SPEED_2_5GB_FULL:
1798 link.link_speed = ETH_SPEED_NUM_2_5G;
1801 case TXGBE_LINK_SPEED_5GB_FULL:
1802 link.link_speed = ETH_SPEED_NUM_5G;
1805 case TXGBE_LINK_SPEED_10GB_FULL:
1806 link.link_speed = ETH_SPEED_NUM_10G;
1810 return rte_eth_linkstatus_set(dev, &link);
1814 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1816 return txgbe_dev_link_update_share(dev, wait_to_complete);
1820 * It clears the interrupt causes and enables the interrupt.
1821 * It will be called once only during nic initialized.
1824 * Pointer to struct rte_eth_dev.
1826 * Enable or Disable.
1829 * - On success, zero.
1830 * - On failure, a negative value.
1833 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1835 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1837 txgbe_dev_link_status_print(dev);
1839 intr->mask_misc |= TXGBE_ICRMISC_LSC;
1841 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1847 * It clears the interrupt causes and enables the interrupt.
1848 * It will be called once only during nic initialized.
1851 * Pointer to struct rte_eth_dev.
1854 * - On success, zero.
1855 * - On failure, a negative value.
1858 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1860 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1862 intr->mask[0] |= TXGBE_ICR_MASK;
1863 intr->mask[1] |= TXGBE_ICR_MASK;
1869 * It clears the interrupt causes and enables the interrupt.
1870 * It will be called once only during nic initialized.
1873 * Pointer to struct rte_eth_dev.
1876 * - On success, zero.
1877 * - On failure, a negative value.
1880 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1882 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1884 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
1890 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
1893 * Pointer to struct rte_eth_dev.
1896 * - On success, zero.
1897 * - On failure, a negative value.
1900 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1903 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1904 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1906 /* clear all cause mask */
1907 txgbe_disable_intr(hw);
1909 /* read-on-clear nic registers here */
1910 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1911 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1915 /* set flag for async link update */
1916 if (eicr & TXGBE_ICRMISC_LSC)
1917 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1919 if (eicr & TXGBE_ICRMISC_VFMBX)
1920 intr->flags |= TXGBE_FLAG_MAILBOX;
1922 if (eicr & TXGBE_ICRMISC_LNKSEC)
1923 intr->flags |= TXGBE_FLAG_MACSEC;
1925 if (eicr & TXGBE_ICRMISC_GPIO)
1926 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
1932 * It gets and then prints the link status.
1935 * Pointer to struct rte_eth_dev.
1938 * - On success, zero.
1939 * - On failure, a negative value.
1942 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
1944 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1945 struct rte_eth_link link;
1947 rte_eth_linkstatus_get(dev, &link);
1949 if (link.link_status) {
1950 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1951 (int)(dev->data->port_id),
1952 (unsigned int)link.link_speed,
1953 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1954 "full-duplex" : "half-duplex");
1956 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1957 (int)(dev->data->port_id));
1959 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1960 pci_dev->addr.domain,
1962 pci_dev->addr.devid,
1963 pci_dev->addr.function);
1967 * It executes link_update after knowing an interrupt occurred.
1970 * Pointer to struct rte_eth_dev.
1973 * - On success, zero.
1974 * - On failure, a negative value.
1977 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1978 struct rte_intr_handle *intr_handle)
1980 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1982 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1984 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1986 if (intr->flags & TXGBE_FLAG_MAILBOX)
1987 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1989 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1990 hw->phy.handle_lasi(hw);
1991 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1994 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1995 struct rte_eth_link link;
1997 /*get the link status before link update, for predicting later*/
1998 rte_eth_linkstatus_get(dev, &link);
2000 txgbe_dev_link_update(dev, 0);
2003 if (!link.link_status)
2004 /* handle it 1 sec later, wait it being stable */
2005 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2006 /* likely to down */
2008 /* handle it 4 sec later, wait it being stable */
2009 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2011 txgbe_dev_link_status_print(dev);
2012 if (rte_eal_alarm_set(timeout * 1000,
2013 txgbe_dev_interrupt_delayed_handler,
2015 PMD_DRV_LOG(ERR, "Error setting alarm");
2017 /* remember original mask */
2018 intr->mask_misc_orig = intr->mask_misc;
2019 /* only disable lsc interrupt */
2020 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2024 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2025 txgbe_enable_intr(dev);
2026 rte_intr_enable(intr_handle);
2032 * Interrupt handler which shall be registered for alarm callback for delayed
2033 * handling specific interrupt to wait for the stable nic state. As the
2034 * NIC interrupt state is not stable for txgbe after link is just down,
2035 * it needs to wait 4 seconds to get the stable status.
2038 * Pointer to interrupt handle.
2040 * The address of parameter (struct rte_eth_dev *) registered before.
2046 txgbe_dev_interrupt_delayed_handler(void *param)
2048 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2049 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2050 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2051 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2052 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2055 txgbe_disable_intr(hw);
2057 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2059 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2060 hw->phy.handle_lasi(hw);
2061 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2064 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2065 txgbe_dev_link_update(dev, 0);
2066 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2067 txgbe_dev_link_status_print(dev);
2068 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2072 if (intr->flags & TXGBE_FLAG_MACSEC) {
2073 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2075 intr->flags &= ~TXGBE_FLAG_MACSEC;
2078 /* restore original mask */
2079 intr->mask_misc = intr->mask_misc_orig;
2080 intr->mask_misc_orig = 0;
2082 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2083 txgbe_enable_intr(dev);
2084 rte_intr_enable(intr_handle);
2088 * Interrupt handler triggered by NIC for handling
2089 * specific interrupt.
2092 * Pointer to interrupt handle.
2094 * The address of parameter (struct rte_eth_dev *) registered before.
2100 txgbe_dev_interrupt_handler(void *param)
2102 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2104 txgbe_dev_interrupt_get_status(dev);
2105 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2109 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2110 uint32_t index, uint32_t pool)
2112 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2113 uint32_t enable_addr = 1;
2115 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2120 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2122 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2124 txgbe_clear_rar(hw, index);
2128 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2130 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2132 txgbe_remove_rar(dev, 0);
2133 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2139 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2141 uint32_t vector = 0;
2143 switch (hw->mac.mc_filter_type) {
2144 case 0: /* use bits [47:36] of the address */
2145 vector = ((uc_addr->addr_bytes[4] >> 4) |
2146 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2148 case 1: /* use bits [46:35] of the address */
2149 vector = ((uc_addr->addr_bytes[4] >> 3) |
2150 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2152 case 2: /* use bits [45:34] of the address */
2153 vector = ((uc_addr->addr_bytes[4] >> 2) |
2154 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2156 case 3: /* use bits [43:32] of the address */
2157 vector = ((uc_addr->addr_bytes[4]) |
2158 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2160 default: /* Invalid mc_filter_type */
2164 /* vector can only be 12-bits or boundary will be exceeded */
2170 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2171 struct rte_ether_addr *mac_addr, uint8_t on)
2179 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2180 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2182 /* The UTA table only exists on pf hardware */
2183 if (hw->mac.type < txgbe_mac_raptor)
2186 vector = txgbe_uta_vector(hw, mac_addr);
2187 uta_idx = (vector >> 5) & 0x7F;
2188 uta_mask = 0x1UL << (vector & 0x1F);
2190 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2193 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2195 uta_info->uta_in_use++;
2196 reg_val |= uta_mask;
2197 uta_info->uta_shadow[uta_idx] |= uta_mask;
2199 uta_info->uta_in_use--;
2200 reg_val &= ~uta_mask;
2201 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2204 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2206 psrctl = rd32(hw, TXGBE_PSRCTL);
2207 if (uta_info->uta_in_use > 0)
2208 psrctl |= TXGBE_PSRCTL_UCHFENA;
2210 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2212 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2213 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2214 wr32(hw, TXGBE_PSRCTL, psrctl);
2220 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2222 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2223 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2227 /* The UTA table only exists on pf hardware */
2228 if (hw->mac.type < txgbe_mac_raptor)
2232 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2233 uta_info->uta_shadow[i] = ~0;
2234 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2237 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2238 uta_info->uta_shadow[i] = 0;
2239 wr32(hw, TXGBE_UCADDRTBL(i), 0);
2243 psrctl = rd32(hw, TXGBE_PSRCTL);
2245 psrctl |= TXGBE_PSRCTL_UCHFENA;
2247 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2249 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2250 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2251 wr32(hw, TXGBE_PSRCTL, psrctl);
2257 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2259 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2260 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2262 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2264 if (queue_id < 32) {
2265 mask = rd32(hw, TXGBE_IMS(0));
2266 mask &= (1 << queue_id);
2267 wr32(hw, TXGBE_IMS(0), mask);
2268 } else if (queue_id < 64) {
2269 mask = rd32(hw, TXGBE_IMS(1));
2270 mask &= (1 << (queue_id - 32));
2271 wr32(hw, TXGBE_IMS(1), mask);
2273 rte_intr_enable(intr_handle);
2279 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2282 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2284 if (queue_id < 32) {
2285 mask = rd32(hw, TXGBE_IMS(0));
2286 mask &= ~(1 << queue_id);
2287 wr32(hw, TXGBE_IMS(0), mask);
2288 } else if (queue_id < 64) {
2289 mask = rd32(hw, TXGBE_IMS(1));
2290 mask &= ~(1 << (queue_id - 32));
2291 wr32(hw, TXGBE_IMS(1), mask);
2298 * set the IVAR registers, mapping interrupt causes to vectors
2300 * pointer to txgbe_hw struct
2302 * 0 for Rx, 1 for Tx, -1 for other causes
2304 * queue to map the corresponding interrupt to
2306 * the vector to map to the corresponding queue
2309 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2310 uint8_t queue, uint8_t msix_vector)
2314 if (direction == -1) {
2316 msix_vector |= TXGBE_IVARMISC_VLD;
2318 tmp = rd32(hw, TXGBE_IVARMISC);
2319 tmp &= ~(0xFF << idx);
2320 tmp |= (msix_vector << idx);
2321 wr32(hw, TXGBE_IVARMISC, tmp);
2323 /* rx or tx causes */
2324 /* Workround for ICR lost */
2325 idx = ((16 * (queue & 1)) + (8 * direction));
2326 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2327 tmp &= ~(0xFF << idx);
2328 tmp |= (msix_vector << idx);
2329 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2334 * Sets up the hardware to properly generate MSI-X interrupts
2336 * board private structure
2339 txgbe_configure_msix(struct rte_eth_dev *dev)
2341 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2342 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2343 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2344 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2345 uint32_t vec = TXGBE_MISC_VEC_ID;
2348 /* won't configure msix register if no mapping is done
2349 * between intr vector and event fd
2350 * but if misx has been enabled already, need to configure
2351 * auto clean, auto mask and throttling.
2353 gpie = rd32(hw, TXGBE_GPIE);
2354 if (!rte_intr_dp_is_en(intr_handle) &&
2355 !(gpie & TXGBE_GPIE_MSIX))
2358 if (rte_intr_allow_others(intr_handle)) {
2359 base = TXGBE_RX_VEC_START;
2363 /* setup GPIE for MSI-x mode */
2364 gpie = rd32(hw, TXGBE_GPIE);
2365 gpie |= TXGBE_GPIE_MSIX;
2366 wr32(hw, TXGBE_GPIE, gpie);
2368 /* Populate the IVAR table and set the ITR values to the
2369 * corresponding register.
2371 if (rte_intr_dp_is_en(intr_handle)) {
2372 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2374 /* by default, 1:1 mapping */
2375 txgbe_set_ivar_map(hw, 0, queue_id, vec);
2376 intr_handle->intr_vec[queue_id] = vec;
2377 if (vec < base + intr_handle->nb_efd - 1)
2381 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
2383 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
2384 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2389 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
2390 u8 **mc_addr_ptr, u32 *vmdq)
2395 mc_addr = *mc_addr_ptr;
2396 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2401 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2402 struct rte_ether_addr *mc_addr_set,
2403 uint32_t nb_mc_addr)
2405 struct txgbe_hw *hw;
2408 hw = TXGBE_DEV_HW(dev);
2409 mc_addr_list = (u8 *)mc_addr_set;
2410 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2411 txgbe_dev_addr_list_itr, TRUE);
2414 static const struct eth_dev_ops txgbe_eth_dev_ops = {
2415 .dev_configure = txgbe_dev_configure,
2416 .dev_infos_get = txgbe_dev_info_get,
2417 .dev_start = txgbe_dev_start,
2418 .dev_stop = txgbe_dev_stop,
2419 .dev_set_link_up = txgbe_dev_set_link_up,
2420 .dev_set_link_down = txgbe_dev_set_link_down,
2421 .dev_close = txgbe_dev_close,
2422 .dev_reset = txgbe_dev_reset,
2423 .link_update = txgbe_dev_link_update,
2424 .stats_get = txgbe_dev_stats_get,
2425 .xstats_get = txgbe_dev_xstats_get,
2426 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
2427 .stats_reset = txgbe_dev_stats_reset,
2428 .xstats_reset = txgbe_dev_xstats_reset,
2429 .xstats_get_names = txgbe_dev_xstats_get_names,
2430 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
2431 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
2432 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
2433 .rx_queue_start = txgbe_dev_rx_queue_start,
2434 .rx_queue_stop = txgbe_dev_rx_queue_stop,
2435 .tx_queue_start = txgbe_dev_tx_queue_start,
2436 .tx_queue_stop = txgbe_dev_tx_queue_stop,
2437 .rx_queue_setup = txgbe_dev_rx_queue_setup,
2438 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
2439 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
2440 .rx_queue_release = txgbe_dev_rx_queue_release,
2441 .tx_queue_setup = txgbe_dev_tx_queue_setup,
2442 .tx_queue_release = txgbe_dev_tx_queue_release,
2443 .mac_addr_add = txgbe_add_rar,
2444 .mac_addr_remove = txgbe_remove_rar,
2445 .mac_addr_set = txgbe_set_default_mac_addr,
2446 .uc_hash_table_set = txgbe_uc_hash_table_set,
2447 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
2448 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
2449 .rxq_info_get = txgbe_rxq_info_get,
2450 .txq_info_get = txgbe_txq_info_get,
2453 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2454 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2455 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2457 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2458 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2460 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2461 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2463 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2464 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2467 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2468 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);