1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41 struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (h)->bitmap[idx] |= 1 << bit;\
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55 (h)->bitmap[idx] &= ~(1 << bit);\
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61 (r) = (h)->bitmap[idx] >> bit & 1;\
65 * The set of PCI devices this driver supports
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70 { .vendor_id = 0, /* sentinel */ },
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74 .nb_max = TXGBE_RING_DESC_MAX,
75 .nb_min = TXGBE_RING_DESC_MIN,
76 .nb_align = TXGBE_RXD_ALIGN,
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80 .nb_max = TXGBE_RING_DESC_MAX,
81 .nb_min = TXGBE_RING_DESC_MIN,
82 .nb_align = TXGBE_TXD_ALIGN,
83 .nb_seg_max = TXGBE_TX_MAX_SEG,
84 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
93 HW_XSTAT(mng_bmc2host_packets),
94 HW_XSTAT(mng_host2bmc_packets),
100 HW_XSTAT(rx_total_bytes),
101 HW_XSTAT(rx_total_packets),
102 HW_XSTAT(tx_total_packets),
103 HW_XSTAT(rx_total_missed_packets),
104 HW_XSTAT(rx_broadcast_packets),
105 HW_XSTAT(rx_multicast_packets),
106 HW_XSTAT(rx_management_packets),
107 HW_XSTAT(tx_management_packets),
108 HW_XSTAT(rx_management_dropped),
111 HW_XSTAT(rx_crc_errors),
112 HW_XSTAT(rx_illegal_byte_errors),
113 HW_XSTAT(rx_error_bytes),
114 HW_XSTAT(rx_mac_short_packet_dropped),
115 HW_XSTAT(rx_length_errors),
116 HW_XSTAT(rx_undersize_errors),
117 HW_XSTAT(rx_fragment_errors),
118 HW_XSTAT(rx_oversize_errors),
119 HW_XSTAT(rx_jabber_errors),
120 HW_XSTAT(rx_l3_l4_xsum_error),
121 HW_XSTAT(mac_local_errors),
122 HW_XSTAT(mac_remote_errors),
125 HW_XSTAT(flow_director_added_filters),
126 HW_XSTAT(flow_director_removed_filters),
127 HW_XSTAT(flow_director_filter_add_errors),
128 HW_XSTAT(flow_director_filter_remove_errors),
129 HW_XSTAT(flow_director_matched_filters),
130 HW_XSTAT(flow_director_missed_filters),
133 HW_XSTAT(rx_fcoe_crc_errors),
134 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135 HW_XSTAT(rx_fcoe_dropped),
136 HW_XSTAT(rx_fcoe_packets),
137 HW_XSTAT(tx_fcoe_packets),
138 HW_XSTAT(rx_fcoe_bytes),
139 HW_XSTAT(tx_fcoe_bytes),
140 HW_XSTAT(rx_fcoe_no_ddp),
141 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
144 HW_XSTAT(tx_macsec_pkts_untagged),
145 HW_XSTAT(tx_macsec_pkts_encrypted),
146 HW_XSTAT(tx_macsec_pkts_protected),
147 HW_XSTAT(tx_macsec_octets_encrypted),
148 HW_XSTAT(tx_macsec_octets_protected),
149 HW_XSTAT(rx_macsec_pkts_untagged),
150 HW_XSTAT(rx_macsec_pkts_badtag),
151 HW_XSTAT(rx_macsec_pkts_nosci),
152 HW_XSTAT(rx_macsec_pkts_unknownsci),
153 HW_XSTAT(rx_macsec_octets_decrypted),
154 HW_XSTAT(rx_macsec_octets_validated),
155 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156 HW_XSTAT(rx_macsec_sc_pkts_delayed),
157 HW_XSTAT(rx_macsec_sc_pkts_late),
158 HW_XSTAT(rx_macsec_sa_pkts_ok),
159 HW_XSTAT(rx_macsec_sa_pkts_invalid),
160 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
165 HW_XSTAT(rx_size_64_packets),
166 HW_XSTAT(rx_size_65_to_127_packets),
167 HW_XSTAT(rx_size_128_to_255_packets),
168 HW_XSTAT(rx_size_256_to_511_packets),
169 HW_XSTAT(rx_size_512_to_1023_packets),
170 HW_XSTAT(rx_size_1024_to_max_packets),
171 HW_XSTAT(tx_size_64_packets),
172 HW_XSTAT(tx_size_65_to_127_packets),
173 HW_XSTAT(tx_size_128_to_255_packets),
174 HW_XSTAT(tx_size_256_to_511_packets),
175 HW_XSTAT(tx_size_512_to_1023_packets),
176 HW_XSTAT(tx_size_1024_to_max_packets),
179 HW_XSTAT(tx_xon_packets),
180 HW_XSTAT(rx_xon_packets),
181 HW_XSTAT(tx_xoff_packets),
182 HW_XSTAT(rx_xoff_packets),
184 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191 sizeof(rte_txgbe_stats_strings[0]))
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196 UP_XSTAT(rx_up_packets),
197 UP_XSTAT(tx_up_packets),
198 UP_XSTAT(rx_up_bytes),
199 UP_XSTAT(tx_up_bytes),
200 UP_XSTAT(rx_up_drop_packets),
202 UP_XSTAT(tx_up_xon_packets),
203 UP_XSTAT(rx_up_xon_packets),
204 UP_XSTAT(tx_up_xoff_packets),
205 UP_XSTAT(rx_up_xoff_packets),
206 UP_XSTAT(rx_up_dropped),
207 UP_XSTAT(rx_up_mbuf_alloc_errors),
208 UP_XSTAT(tx_up_xon2off_packets),
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212 sizeof(rte_txgbe_up_strings[0]))
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217 QP_XSTAT(rx_qp_packets),
218 QP_XSTAT(tx_qp_packets),
219 QP_XSTAT(rx_qp_bytes),
220 QP_XSTAT(tx_qp_bytes),
221 QP_XSTAT(rx_qp_mc_packets),
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225 sizeof(rte_txgbe_qp_strings[0]))
228 txgbe_is_sfp(struct txgbe_hw *hw)
230 switch (hw->phy.type) {
231 case txgbe_phy_sfp_avago:
232 case txgbe_phy_sfp_ftl:
233 case txgbe_phy_sfp_intel:
234 case txgbe_phy_sfp_unknown:
235 case txgbe_phy_sfp_tyco_passive:
236 case txgbe_phy_sfp_unknown_passive:
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
249 status = hw->mac.reset_hw(hw);
251 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
257 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
263 txgbe_enable_intr(struct rte_eth_dev *dev)
265 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
268 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
275 txgbe_disable_intr(struct txgbe_hw *hw)
277 PMD_INIT_FUNC_TRACE();
279 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
291 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292 struct txgbe_stat_mappings *stat_mappings =
293 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294 uint32_t qsmr_mask = 0;
295 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
299 if (hw->mac.type != txgbe_mac_raptor)
302 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
305 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
309 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310 if (n >= TXGBE_NB_STAT_MAPPING) {
311 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
314 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
316 /* Now clear any previous stat_idx set */
317 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
319 stat_mappings->tqsm[n] &= ~clearing_mask;
321 stat_mappings->rqsm[n] &= ~clearing_mask;
323 q_map = (uint32_t)stat_idx;
324 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
327 stat_mappings->tqsm[n] |= qsmr_mask;
329 stat_mappings->rqsm[n] |= qsmr_mask;
331 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
334 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
340 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
344 struct txgbe_dcb_tc_config *tc;
346 UNREFERENCED_PARAMETER(hw);
348 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
349 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
350 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
351 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
352 tc = &dcb_config->tc_config[i];
353 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
354 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
355 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
356 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
357 tc->pfc = txgbe_dcb_pfc_disabled;
360 /* Initialize default user to priority mapping, UPx->TC0 */
361 tc = &dcb_config->tc_config[0];
362 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
363 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
364 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
365 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
366 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
368 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
369 dcb_config->pfc_mode_enable = false;
370 dcb_config->vt_mode = true;
371 dcb_config->round_robin_enable = false;
372 /* support all DCB capabilities */
373 dcb_config->support.capabilities = 0xFF;
377 * Ensure that all locks are released before first NVM or PHY access
380 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
385 * These ones are more tricky since they are common to all ports; but
386 * swfw_sync retries last long enough (1s) to be almost sure that if
387 * lock can not be taken it is due to an improper lock of the
390 mask = TXGBE_MNGSEM_SWPHY |
392 TXGBE_MNGSEM_SWFLASH;
393 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
394 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
396 hw->mac.release_swfw_sync(hw, mask);
400 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
402 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
403 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
404 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
405 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
406 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
407 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
408 const struct rte_memzone *mz;
413 PMD_INIT_FUNC_TRACE();
415 eth_dev->dev_ops = &txgbe_eth_dev_ops;
416 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
417 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
418 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
421 * For secondary processes, we don't initialise any further as primary
422 * has already done this work. Only check we don't need a different
423 * RX and TX function.
425 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
426 struct txgbe_tx_queue *txq;
427 /* TX queue function in primary, set by last queue initialized
428 * Tx queue may not initialized by primary process
430 if (eth_dev->data->tx_queues) {
431 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
432 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
433 txgbe_set_tx_function(eth_dev, txq);
435 /* Use default TX function if we get here */
436 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
437 "Using default TX function.");
440 txgbe_set_rx_function(eth_dev);
445 rte_eth_copy_pci_info(eth_dev, pci_dev);
447 /* Vendor and Device ID need to be set before init of shared code */
448 hw->device_id = pci_dev->id.device_id;
449 hw->vendor_id = pci_dev->id.vendor_id;
450 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
451 hw->allow_unsupported_sfp = 1;
453 /* Reserve memory for interrupt status block */
454 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
455 16, TXGBE_ALIGN, SOCKET_ID_ANY);
459 hw->isb_dma = TMZ_PADDR(mz);
460 hw->isb_mem = TMZ_VADDR(mz);
462 /* Initialize the shared code (base driver) */
463 err = txgbe_init_shared_code(hw);
465 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
469 /* Unlock any pending hardware semaphore */
470 txgbe_swfw_lock_reset(hw);
472 /* Initialize DCB configuration*/
473 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
474 txgbe_dcb_init(hw, dcb_config);
476 err = hw->rom.init_params(hw);
478 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
482 /* Make sure we have a good EEPROM before we read from it */
483 err = hw->rom.validate_checksum(hw, &csum);
485 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
489 err = hw->mac.init_hw(hw);
492 * Devices with copper phys will fail to initialise if txgbe_init_hw()
493 * is called too soon after the kernel driver unbinding/binding occurs.
494 * The failure occurs in txgbe_identify_phy() for all devices,
495 * but for non-copper devies, txgbe_identify_sfp_module() is
496 * also called. See txgbe_identify_phy(). The reason for the
497 * failure is not known, and only occuts when virtualisation features
498 * are disabled in the bios. A delay of 200ms was found to be enough by
499 * trial-and-error, and is doubled to be safe.
501 if (err && hw->phy.media_type == txgbe_media_type_copper) {
503 err = hw->mac.init_hw(hw);
506 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
509 if (err == TXGBE_ERR_EEPROM_VERSION) {
510 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
511 "LOM. Please be aware there may be issues associated "
512 "with your hardware.");
513 PMD_INIT_LOG(ERR, "If you are experiencing problems "
514 "please contact your hardware representative "
515 "who provided you with this hardware.");
516 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
517 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
520 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
524 /* Reset the hw statistics */
525 txgbe_dev_stats_reset(eth_dev);
527 /* disable interrupt */
528 txgbe_disable_intr(hw);
530 /* Allocate memory for storing MAC addresses */
531 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
532 hw->mac.num_rar_entries, 0);
533 if (eth_dev->data->mac_addrs == NULL) {
535 "Failed to allocate %u bytes needed to store "
537 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
541 /* Copy the permanent MAC address */
542 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
543 ð_dev->data->mac_addrs[0]);
545 /* Allocate memory for storing hash filter MAC addresses */
546 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
547 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
548 if (eth_dev->data->hash_mac_addrs == NULL) {
550 "Failed to allocate %d bytes needed to store MAC addresses",
551 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
555 /* initialize the vfta */
556 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
558 /* initialize the hw strip bitmap*/
559 memset(hwstrip, 0, sizeof(*hwstrip));
561 /* initialize PF if max_vfs not zero */
562 txgbe_pf_host_init(eth_dev);
564 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
565 /* let hardware know driver is loaded */
566 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
567 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
568 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
569 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
572 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
573 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
574 (int)hw->mac.type, (int)hw->phy.type,
575 (int)hw->phy.sfp_type);
577 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
578 (int)hw->mac.type, (int)hw->phy.type);
580 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
581 eth_dev->data->port_id, pci_dev->id.vendor_id,
582 pci_dev->id.device_id);
584 rte_intr_callback_register(intr_handle,
585 txgbe_dev_interrupt_handler, eth_dev);
587 /* enable uio/vfio intr/eventfd mapping */
588 rte_intr_enable(intr_handle);
590 /* enable support intr */
591 txgbe_enable_intr(eth_dev);
597 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
599 PMD_INIT_FUNC_TRACE();
601 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
604 txgbe_dev_close(eth_dev);
610 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
611 struct rte_pci_device *pci_dev)
613 struct rte_eth_dev *pf_ethdev;
614 struct rte_eth_devargs eth_da;
617 if (pci_dev->device.devargs) {
618 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
623 memset(ð_da, 0, sizeof(eth_da));
626 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
627 sizeof(struct txgbe_adapter),
628 eth_dev_pci_specific_init, pci_dev,
629 eth_txgbe_dev_init, NULL);
631 if (retval || eth_da.nb_representor_ports < 1)
634 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
635 if (pf_ethdev == NULL)
641 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
643 struct rte_eth_dev *ethdev;
645 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
649 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
652 static struct rte_pci_driver rte_txgbe_pmd = {
653 .id_table = pci_id_txgbe_map,
654 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
655 RTE_PCI_DRV_INTR_LSC,
656 .probe = eth_txgbe_pci_probe,
657 .remove = eth_txgbe_pci_remove,
661 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
663 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
664 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
669 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
670 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
671 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
676 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
678 /* update local VFTA copy */
679 shadow_vfta->vfta[vid_idx] = vfta;
685 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
687 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
688 struct txgbe_rx_queue *rxq;
690 uint32_t rxcfg, rxbal, rxbah;
693 txgbe_vlan_hw_strip_enable(dev, queue);
695 txgbe_vlan_hw_strip_disable(dev, queue);
697 rxq = dev->data->rx_queues[queue];
698 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
699 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
700 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
701 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
702 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
703 !(rxcfg & TXGBE_RXCFG_VLAN);
704 rxcfg |= TXGBE_RXCFG_VLAN;
706 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
707 (rxcfg & TXGBE_RXCFG_VLAN);
708 rxcfg &= ~TXGBE_RXCFG_VLAN;
710 rxcfg &= ~TXGBE_RXCFG_ENA;
713 /* set vlan strip for ring */
714 txgbe_dev_rx_queue_stop(dev, queue);
715 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
716 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
717 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
718 txgbe_dev_rx_queue_start(dev, queue);
723 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
724 enum rte_vlan_type vlan_type,
727 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
729 uint32_t portctrl, vlan_ext, qinq;
731 portctrl = rd32(hw, TXGBE_PORTCTL);
733 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
734 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
736 case ETH_VLAN_TYPE_INNER:
738 wr32m(hw, TXGBE_VLANCTL,
739 TXGBE_VLANCTL_TPID_MASK,
740 TXGBE_VLANCTL_TPID(tpid));
741 wr32m(hw, TXGBE_DMATXCTRL,
742 TXGBE_DMATXCTRL_TPID_MASK,
743 TXGBE_DMATXCTRL_TPID(tpid));
746 PMD_DRV_LOG(ERR, "Inner type is not supported"
751 wr32m(hw, TXGBE_TAGTPID(0),
752 TXGBE_TAGTPID_LSB_MASK,
753 TXGBE_TAGTPID_LSB(tpid));
756 case ETH_VLAN_TYPE_OUTER:
758 /* Only the high 16-bits is valid */
759 wr32m(hw, TXGBE_EXTAG,
760 TXGBE_EXTAG_VLAN_MASK,
761 TXGBE_EXTAG_VLAN(tpid));
763 wr32m(hw, TXGBE_VLANCTL,
764 TXGBE_VLANCTL_TPID_MASK,
765 TXGBE_VLANCTL_TPID(tpid));
766 wr32m(hw, TXGBE_DMATXCTRL,
767 TXGBE_DMATXCTRL_TPID_MASK,
768 TXGBE_DMATXCTRL_TPID(tpid));
772 wr32m(hw, TXGBE_TAGTPID(0),
773 TXGBE_TAGTPID_MSB_MASK,
774 TXGBE_TAGTPID_MSB(tpid));
778 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
786 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
788 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
791 PMD_INIT_FUNC_TRACE();
793 /* Filter Table Disable */
794 vlnctrl = rd32(hw, TXGBE_VLANCTL);
795 vlnctrl &= ~TXGBE_VLANCTL_VFE;
796 wr32(hw, TXGBE_VLANCTL, vlnctrl);
800 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
802 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
803 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
807 PMD_INIT_FUNC_TRACE();
809 /* Filter Table Enable */
810 vlnctrl = rd32(hw, TXGBE_VLANCTL);
811 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
812 vlnctrl |= TXGBE_VLANCTL_VFE;
813 wr32(hw, TXGBE_VLANCTL, vlnctrl);
815 /* write whatever is in local vfta copy */
816 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
817 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
821 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
823 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
824 struct txgbe_rx_queue *rxq;
826 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
830 TXGBE_SET_HWSTRIP(hwstrip, queue);
832 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
834 if (queue >= dev->data->nb_rx_queues)
837 rxq = dev->data->rx_queues[queue];
840 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
841 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
843 rxq->vlan_flags = PKT_RX_VLAN;
844 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
849 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
851 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
854 PMD_INIT_FUNC_TRACE();
856 ctrl = rd32(hw, TXGBE_RXCFG(queue));
857 ctrl &= ~TXGBE_RXCFG_VLAN;
858 wr32(hw, TXGBE_RXCFG(queue), ctrl);
860 /* record those setting for HW strip per queue */
861 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
865 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
867 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
870 PMD_INIT_FUNC_TRACE();
872 ctrl = rd32(hw, TXGBE_RXCFG(queue));
873 ctrl |= TXGBE_RXCFG_VLAN;
874 wr32(hw, TXGBE_RXCFG(queue), ctrl);
876 /* record those setting for HW strip per queue */
877 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
881 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
883 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
886 PMD_INIT_FUNC_TRACE();
888 ctrl = rd32(hw, TXGBE_PORTCTL);
889 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
890 ctrl &= ~TXGBE_PORTCTL_QINQ;
891 wr32(hw, TXGBE_PORTCTL, ctrl);
895 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
897 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
898 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
899 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
902 PMD_INIT_FUNC_TRACE();
904 ctrl = rd32(hw, TXGBE_PORTCTL);
905 ctrl |= TXGBE_PORTCTL_VLANEXT;
906 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
907 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
908 ctrl |= TXGBE_PORTCTL_QINQ;
909 wr32(hw, TXGBE_PORTCTL, ctrl);
913 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
915 struct txgbe_rx_queue *rxq;
918 PMD_INIT_FUNC_TRACE();
920 for (i = 0; i < dev->data->nb_rx_queues; i++) {
921 rxq = dev->data->rx_queues[i];
923 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
924 txgbe_vlan_strip_queue_set(dev, i, 1);
926 txgbe_vlan_strip_queue_set(dev, i, 0);
931 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
934 struct rte_eth_rxmode *rxmode;
935 struct txgbe_rx_queue *rxq;
937 if (mask & ETH_VLAN_STRIP_MASK) {
938 rxmode = &dev->data->dev_conf.rxmode;
939 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
940 for (i = 0; i < dev->data->nb_rx_queues; i++) {
941 rxq = dev->data->rx_queues[i];
942 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
945 for (i = 0; i < dev->data->nb_rx_queues; i++) {
946 rxq = dev->data->rx_queues[i];
947 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
953 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
955 struct rte_eth_rxmode *rxmode;
956 rxmode = &dev->data->dev_conf.rxmode;
958 if (mask & ETH_VLAN_STRIP_MASK)
959 txgbe_vlan_hw_strip_config(dev);
961 if (mask & ETH_VLAN_FILTER_MASK) {
962 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
963 txgbe_vlan_hw_filter_enable(dev);
965 txgbe_vlan_hw_filter_disable(dev);
968 if (mask & ETH_VLAN_EXTEND_MASK) {
969 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
970 txgbe_vlan_hw_extend_enable(dev);
972 txgbe_vlan_hw_extend_disable(dev);
979 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
981 txgbe_config_vlan_strip_on_all_queues(dev, mask);
983 txgbe_vlan_offload_config(dev, mask);
989 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
991 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
992 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
993 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
995 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
996 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1000 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1002 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1007 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1010 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1016 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1017 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1018 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1019 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1024 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1026 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1027 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1028 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1030 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1031 /* check multi-queue mode */
1032 switch (dev_conf->rxmode.mq_mode) {
1033 case ETH_MQ_RX_VMDQ_DCB:
1034 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1036 case ETH_MQ_RX_VMDQ_DCB_RSS:
1037 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1038 PMD_INIT_LOG(ERR, "SRIOV active,"
1039 " unsupported mq_mode rx %d.",
1040 dev_conf->rxmode.mq_mode);
1043 case ETH_MQ_RX_VMDQ_RSS:
1044 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1045 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1046 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1047 PMD_INIT_LOG(ERR, "SRIOV is active,"
1048 " invalid queue number"
1049 " for VMDQ RSS, allowed"
1050 " value are 1, 2 or 4.");
1054 case ETH_MQ_RX_VMDQ_ONLY:
1055 case ETH_MQ_RX_NONE:
1056 /* if nothing mq mode configure, use default scheme */
1057 dev->data->dev_conf.rxmode.mq_mode =
1058 ETH_MQ_RX_VMDQ_ONLY;
1060 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1061 /* SRIOV only works in VMDq enable mode */
1062 PMD_INIT_LOG(ERR, "SRIOV is active,"
1063 " wrong mq_mode rx %d.",
1064 dev_conf->rxmode.mq_mode);
1068 switch (dev_conf->txmode.mq_mode) {
1069 case ETH_MQ_TX_VMDQ_DCB:
1070 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1071 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1073 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1074 dev->data->dev_conf.txmode.mq_mode =
1075 ETH_MQ_TX_VMDQ_ONLY;
1079 /* check valid queue number */
1080 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1081 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1082 PMD_INIT_LOG(ERR, "SRIOV is active,"
1083 " nb_rx_q=%d nb_tx_q=%d queue number"
1084 " must be less than or equal to %d.",
1086 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1090 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1091 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1095 /* check configuration for vmdb+dcb mode */
1096 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1097 const struct rte_eth_vmdq_dcb_conf *conf;
1099 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1100 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1101 TXGBE_VMDQ_DCB_NB_QUEUES);
1104 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1105 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1106 conf->nb_queue_pools == ETH_32_POOLS)) {
1107 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1108 " nb_queue_pools must be %d or %d.",
1109 ETH_16_POOLS, ETH_32_POOLS);
1113 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1114 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1116 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1117 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1118 TXGBE_VMDQ_DCB_NB_QUEUES);
1121 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1122 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1123 conf->nb_queue_pools == ETH_32_POOLS)) {
1124 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1125 " nb_queue_pools != %d and"
1126 " nb_queue_pools != %d.",
1127 ETH_16_POOLS, ETH_32_POOLS);
1132 /* For DCB mode check our configuration before we go further */
1133 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1134 const struct rte_eth_dcb_rx_conf *conf;
1136 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1137 if (!(conf->nb_tcs == ETH_4_TCS ||
1138 conf->nb_tcs == ETH_8_TCS)) {
1139 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1140 " and nb_tcs != %d.",
1141 ETH_4_TCS, ETH_8_TCS);
1146 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1147 const struct rte_eth_dcb_tx_conf *conf;
1149 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1150 if (!(conf->nb_tcs == ETH_4_TCS ||
1151 conf->nb_tcs == ETH_8_TCS)) {
1152 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1153 " and nb_tcs != %d.",
1154 ETH_4_TCS, ETH_8_TCS);
1163 txgbe_dev_configure(struct rte_eth_dev *dev)
1165 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1166 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1169 PMD_INIT_FUNC_TRACE();
1171 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1172 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1174 /* multiple queue mode checking */
1175 ret = txgbe_check_mq_mode(dev);
1177 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1182 /* set flag to update link status after init */
1183 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1186 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1187 * allocation Rx preconditions we will reset it.
1189 adapter->rx_bulk_alloc_allowed = true;
1195 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1197 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1198 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1201 gpie = rd32(hw, TXGBE_GPIOINTEN);
1202 gpie |= TXGBE_GPIOBIT_6;
1203 wr32(hw, TXGBE_GPIOINTEN, gpie);
1204 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1208 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1209 uint16_t tx_rate, uint64_t q_msk)
1211 struct txgbe_hw *hw;
1212 struct txgbe_vf_info *vfinfo;
1213 struct rte_eth_link link;
1214 uint8_t nb_q_per_pool;
1215 uint32_t queue_stride;
1216 uint32_t queue_idx, idx = 0, vf_idx;
1218 uint16_t total_rate = 0;
1219 struct rte_pci_device *pci_dev;
1222 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1223 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1227 if (vf >= pci_dev->max_vfs)
1230 if (tx_rate > link.link_speed)
1236 hw = TXGBE_DEV_HW(dev);
1237 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1238 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1239 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1240 queue_idx = vf * queue_stride;
1241 queue_end = queue_idx + nb_q_per_pool - 1;
1242 if (queue_end >= hw->mac.max_tx_queues)
1246 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1249 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1251 total_rate += vfinfo[vf_idx].tx_rate[idx];
1257 /* Store tx_rate for this vf. */
1258 for (idx = 0; idx < nb_q_per_pool; idx++) {
1259 if (((uint64_t)0x1 << idx) & q_msk) {
1260 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1261 vfinfo[vf].tx_rate[idx] = tx_rate;
1262 total_rate += tx_rate;
1266 if (total_rate > dev->data->dev_link.link_speed) {
1267 /* Reset stored TX rate of the VF if it causes exceed
1270 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1274 /* Set ARBTXRATE of each queue/pool for vf X */
1275 for (; queue_idx <= queue_end; queue_idx++) {
1277 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1285 * Configure device link speed and setup link.
1286 * It returns 0 on success.
1289 txgbe_dev_start(struct rte_eth_dev *dev)
1291 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1292 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1293 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1294 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1295 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1296 uint32_t intr_vector = 0;
1298 bool link_up = false, negotiate = 0;
1300 uint32_t allowed_speeds = 0;
1304 uint32_t *link_speeds;
1306 PMD_INIT_FUNC_TRACE();
1308 /* TXGBE devices don't support:
1309 * - half duplex (checked afterwards for valid speeds)
1310 * - fixed speed: TODO implement
1312 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1314 "Invalid link_speeds for port %u, fix speed not supported",
1315 dev->data->port_id);
1319 /* Stop the link setup handler before resetting the HW. */
1320 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1322 /* disable uio/vfio intr/eventfd mapping */
1323 rte_intr_disable(intr_handle);
1326 hw->adapter_stopped = 0;
1329 /* reinitialize adapter
1330 * this calls reset and start
1332 hw->nb_rx_queues = dev->data->nb_rx_queues;
1333 hw->nb_tx_queues = dev->data->nb_tx_queues;
1334 status = txgbe_pf_reset_hw(hw);
1337 hw->mac.start_hw(hw);
1338 hw->mac.get_link_status = true;
1340 /* configure PF module if SRIOV enabled */
1341 txgbe_pf_host_configure(dev);
1343 txgbe_dev_phy_intr_setup(dev);
1345 /* check and configure queue intr-vector mapping */
1346 if ((rte_intr_cap_multiple(intr_handle) ||
1347 !RTE_ETH_DEV_SRIOV(dev).active) &&
1348 dev->data->dev_conf.intr_conf.rxq != 0) {
1349 intr_vector = dev->data->nb_rx_queues;
1350 if (rte_intr_efd_enable(intr_handle, intr_vector))
1354 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1355 intr_handle->intr_vec =
1356 rte_zmalloc("intr_vec",
1357 dev->data->nb_rx_queues * sizeof(int), 0);
1358 if (intr_handle->intr_vec == NULL) {
1359 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1360 " intr_vec", dev->data->nb_rx_queues);
1365 /* confiugre msix for sleep until rx interrupt */
1366 txgbe_configure_msix(dev);
1368 /* initialize transmission unit */
1369 txgbe_dev_tx_init(dev);
1371 /* This can fail when allocating mbufs for descriptor rings */
1372 err = txgbe_dev_rx_init(dev);
1374 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1378 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1379 ETH_VLAN_EXTEND_MASK;
1380 err = txgbe_vlan_offload_config(dev, mask);
1382 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1386 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1387 /* Enable vlan filtering for VMDq */
1388 txgbe_vmdq_vlan_hw_filter_enable(dev);
1391 /* Configure DCB hw */
1392 txgbe_configure_pb(dev);
1393 txgbe_configure_port(dev);
1394 txgbe_configure_dcb(dev);
1396 /* Restore vf rate limit */
1397 if (vfinfo != NULL) {
1398 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1399 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1400 if (vfinfo[vf].tx_rate[idx] != 0)
1401 txgbe_set_vf_rate_limit(dev, vf,
1402 vfinfo[vf].tx_rate[idx],
1406 err = txgbe_dev_rxtx_start(dev);
1408 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1412 /* Skip link setup if loopback mode is enabled. */
1413 if (hw->mac.type == txgbe_mac_raptor &&
1414 dev->data->dev_conf.lpbk_mode)
1415 goto skip_link_setup;
1417 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1418 err = hw->mac.setup_sfp(hw);
1423 if (hw->phy.media_type == txgbe_media_type_copper) {
1424 /* Turn on the copper */
1425 hw->phy.set_phy_power(hw, true);
1427 /* Turn on the laser */
1428 hw->mac.enable_tx_laser(hw);
1431 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1434 dev->data->dev_link.link_status = link_up;
1436 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1440 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1443 link_speeds = &dev->data->dev_conf.link_speeds;
1444 if (*link_speeds & ~allowed_speeds) {
1445 PMD_INIT_LOG(ERR, "Invalid link setting");
1450 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1451 speed = (TXGBE_LINK_SPEED_100M_FULL |
1452 TXGBE_LINK_SPEED_1GB_FULL |
1453 TXGBE_LINK_SPEED_10GB_FULL);
1455 if (*link_speeds & ETH_LINK_SPEED_10G)
1456 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1457 if (*link_speeds & ETH_LINK_SPEED_5G)
1458 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1459 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1460 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1461 if (*link_speeds & ETH_LINK_SPEED_1G)
1462 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1463 if (*link_speeds & ETH_LINK_SPEED_100M)
1464 speed |= TXGBE_LINK_SPEED_100M_FULL;
1467 err = hw->mac.setup_link(hw, speed, link_up);
1473 if (rte_intr_allow_others(intr_handle)) {
1474 /* check if lsc interrupt is enabled */
1475 if (dev->data->dev_conf.intr_conf.lsc != 0)
1476 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1478 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1479 txgbe_dev_macsec_interrupt_setup(dev);
1480 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1482 rte_intr_callback_unregister(intr_handle,
1483 txgbe_dev_interrupt_handler, dev);
1484 if (dev->data->dev_conf.intr_conf.lsc != 0)
1485 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1486 " no intr multiplex");
1489 /* check if rxq interrupt is enabled */
1490 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1491 rte_intr_dp_is_en(intr_handle))
1492 txgbe_dev_rxq_interrupt_setup(dev);
1494 /* enable uio/vfio intr/eventfd mapping */
1495 rte_intr_enable(intr_handle);
1497 /* resume enabled intr since hw reset */
1498 txgbe_enable_intr(dev);
1501 * Update link status right before return, because it may
1502 * start link configuration process in a separate thread.
1504 txgbe_dev_link_update(dev, 0);
1506 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1508 txgbe_read_stats_registers(hw, hw_stats);
1509 hw->offset_loaded = 1;
1514 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1515 txgbe_dev_clear_queues(dev);
1520 * Stop device: disable rx and tx functions to allow for reconfiguring.
1523 txgbe_dev_stop(struct rte_eth_dev *dev)
1525 struct rte_eth_link link;
1526 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1528 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1529 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1530 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1533 if (hw->adapter_stopped)
1536 PMD_INIT_FUNC_TRACE();
1538 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1540 /* disable interrupts */
1541 txgbe_disable_intr(hw);
1544 txgbe_pf_reset_hw(hw);
1545 hw->adapter_stopped = 0;
1550 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1551 vfinfo[vf].clear_to_send = false;
1553 if (hw->phy.media_type == txgbe_media_type_copper) {
1554 /* Turn off the copper */
1555 hw->phy.set_phy_power(hw, false);
1557 /* Turn off the laser */
1558 hw->mac.disable_tx_laser(hw);
1561 txgbe_dev_clear_queues(dev);
1563 /* Clear stored conf */
1564 dev->data->scattered_rx = 0;
1567 /* Clear recorded link status */
1568 memset(&link, 0, sizeof(link));
1569 rte_eth_linkstatus_set(dev, &link);
1571 if (!rte_intr_allow_others(intr_handle))
1572 /* resume to the default handler */
1573 rte_intr_callback_register(intr_handle,
1574 txgbe_dev_interrupt_handler,
1577 /* Clean datapath event and queue/vec mapping */
1578 rte_intr_efd_disable(intr_handle);
1579 if (intr_handle->intr_vec != NULL) {
1580 rte_free(intr_handle->intr_vec);
1581 intr_handle->intr_vec = NULL;
1584 adapter->rss_reta_updated = 0;
1585 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1587 hw->adapter_stopped = true;
1588 dev->data->dev_started = 0;
1594 * Set device link up: enable tx.
1597 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1599 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1601 if (hw->phy.media_type == txgbe_media_type_copper) {
1602 /* Turn on the copper */
1603 hw->phy.set_phy_power(hw, true);
1605 /* Turn on the laser */
1606 hw->mac.enable_tx_laser(hw);
1607 txgbe_dev_link_update(dev, 0);
1614 * Set device link down: disable tx.
1617 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1619 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1621 if (hw->phy.media_type == txgbe_media_type_copper) {
1622 /* Turn off the copper */
1623 hw->phy.set_phy_power(hw, false);
1625 /* Turn off the laser */
1626 hw->mac.disable_tx_laser(hw);
1627 txgbe_dev_link_update(dev, 0);
1634 * Reset and stop device.
1637 txgbe_dev_close(struct rte_eth_dev *dev)
1639 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1640 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1641 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1645 PMD_INIT_FUNC_TRACE();
1647 txgbe_pf_reset_hw(hw);
1649 ret = txgbe_dev_stop(dev);
1651 txgbe_dev_free_queues(dev);
1653 /* reprogram the RAR[0] in case user changed it. */
1654 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1656 /* Unlock any pending hardware semaphore */
1657 txgbe_swfw_lock_reset(hw);
1659 /* disable uio intr before callback unregister */
1660 rte_intr_disable(intr_handle);
1663 ret = rte_intr_callback_unregister(intr_handle,
1664 txgbe_dev_interrupt_handler, dev);
1665 if (ret >= 0 || ret == -ENOENT) {
1667 } else if (ret != -EAGAIN) {
1669 "intr callback unregister failed: %d",
1673 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1675 /* cancel the delay handler before remove dev */
1676 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1678 /* uninitialize PF if max_vfs not zero */
1679 txgbe_pf_host_uninit(dev);
1681 rte_free(dev->data->mac_addrs);
1682 dev->data->mac_addrs = NULL;
1684 rte_free(dev->data->hash_mac_addrs);
1685 dev->data->hash_mac_addrs = NULL;
1694 txgbe_dev_reset(struct rte_eth_dev *dev)
1698 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1699 * its VF to make them align with it. The detailed notification
1700 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1701 * To avoid unexpected behavior in VF, currently reset of PF with
1702 * SR-IOV activation is not supported. It might be supported later.
1704 if (dev->data->sriov.active)
1707 ret = eth_txgbe_dev_uninit(dev);
1711 ret = eth_txgbe_dev_init(dev, NULL);
1716 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1718 uint32_t current_counter = rd32(hw, reg); \
1719 if (current_counter < last_counter) \
1720 current_counter += 0x100000000LL; \
1721 if (!hw->offset_loaded) \
1722 last_counter = current_counter; \
1723 counter = current_counter - last_counter; \
1724 counter &= 0xFFFFFFFFLL; \
1727 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1729 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1730 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1731 uint64_t current_counter = (current_counter_msb << 32) | \
1732 current_counter_lsb; \
1733 if (current_counter < last_counter) \
1734 current_counter += 0x1000000000LL; \
1735 if (!hw->offset_loaded) \
1736 last_counter = current_counter; \
1737 counter = current_counter - last_counter; \
1738 counter &= 0xFFFFFFFFFLL; \
1742 txgbe_read_stats_registers(struct txgbe_hw *hw,
1743 struct txgbe_hw_stats *hw_stats)
1748 for (i = 0; i < hw->nb_rx_queues; i++) {
1749 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1750 hw->qp_last[i].rx_qp_packets,
1751 hw_stats->qp[i].rx_qp_packets);
1752 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1753 hw->qp_last[i].rx_qp_bytes,
1754 hw_stats->qp[i].rx_qp_bytes);
1755 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1756 hw->qp_last[i].rx_qp_mc_packets,
1757 hw_stats->qp[i].rx_qp_mc_packets);
1760 for (i = 0; i < hw->nb_tx_queues; i++) {
1761 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1762 hw->qp_last[i].tx_qp_packets,
1763 hw_stats->qp[i].tx_qp_packets);
1764 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1765 hw->qp_last[i].tx_qp_bytes,
1766 hw_stats->qp[i].tx_qp_bytes);
1769 for (i = 0; i < TXGBE_MAX_UP; i++) {
1770 hw_stats->up[i].rx_up_xon_packets +=
1771 rd32(hw, TXGBE_PBRXUPXON(i));
1772 hw_stats->up[i].rx_up_xoff_packets +=
1773 rd32(hw, TXGBE_PBRXUPXOFF(i));
1774 hw_stats->up[i].tx_up_xon_packets +=
1775 rd32(hw, TXGBE_PBTXUPXON(i));
1776 hw_stats->up[i].tx_up_xoff_packets +=
1777 rd32(hw, TXGBE_PBTXUPXOFF(i));
1778 hw_stats->up[i].tx_up_xon2off_packets +=
1779 rd32(hw, TXGBE_PBTXUPOFF(i));
1780 hw_stats->up[i].rx_up_dropped +=
1781 rd32(hw, TXGBE_PBRXMISS(i));
1783 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1784 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1785 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1786 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1789 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1790 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1792 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1793 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1794 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1797 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1798 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1799 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1801 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1802 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1803 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1805 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1806 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1808 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1809 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1810 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1811 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1812 hw_stats->rx_size_512_to_1023_packets +=
1813 rd64(hw, TXGBE_MACRX512TO1023L);
1814 hw_stats->rx_size_1024_to_max_packets +=
1815 rd64(hw, TXGBE_MACRX1024TOMAXL);
1816 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1817 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1818 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1819 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1820 hw_stats->tx_size_512_to_1023_packets +=
1821 rd64(hw, TXGBE_MACTX512TO1023L);
1822 hw_stats->tx_size_1024_to_max_packets +=
1823 rd64(hw, TXGBE_MACTX1024TOMAXL);
1825 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1826 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1827 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1830 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1831 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1832 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1833 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1836 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1837 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1838 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1839 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1840 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1841 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1842 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1844 /* Flow Director Stats */
1845 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1846 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1847 hw_stats->flow_director_added_filters +=
1848 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1849 hw_stats->flow_director_removed_filters +=
1850 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1851 hw_stats->flow_director_filter_add_errors +=
1852 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1853 hw_stats->flow_director_filter_remove_errors +=
1854 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1857 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1858 hw_stats->tx_macsec_pkts_encrypted +=
1859 rd32(hw, TXGBE_LSECTX_ENCPKT);
1860 hw_stats->tx_macsec_pkts_protected +=
1861 rd32(hw, TXGBE_LSECTX_PROTPKT);
1862 hw_stats->tx_macsec_octets_encrypted +=
1863 rd32(hw, TXGBE_LSECTX_ENCOCT);
1864 hw_stats->tx_macsec_octets_protected +=
1865 rd32(hw, TXGBE_LSECTX_PROTOCT);
1866 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1867 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1868 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1869 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1870 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1871 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1872 hw_stats->rx_macsec_sc_pkts_unchecked +=
1873 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1874 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1875 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1876 for (i = 0; i < 2; i++) {
1877 hw_stats->rx_macsec_sa_pkts_ok +=
1878 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1879 hw_stats->rx_macsec_sa_pkts_invalid +=
1880 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1881 hw_stats->rx_macsec_sa_pkts_notvalid +=
1882 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1884 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1885 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1886 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1887 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1889 hw_stats->rx_total_missed_packets = 0;
1890 for (i = 0; i < TXGBE_MAX_UP; i++) {
1891 hw_stats->rx_total_missed_packets +=
1892 hw_stats->up[i].rx_up_dropped;
1897 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1899 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1900 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1901 struct txgbe_stat_mappings *stat_mappings =
1902 TXGBE_DEV_STAT_MAPPINGS(dev);
1905 txgbe_read_stats_registers(hw, hw_stats);
1910 /* Fill out the rte_eth_stats statistics structure */
1911 stats->ipackets = hw_stats->rx_packets;
1912 stats->ibytes = hw_stats->rx_bytes;
1913 stats->opackets = hw_stats->tx_packets;
1914 stats->obytes = hw_stats->tx_bytes;
1916 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1917 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1918 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1919 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1920 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1921 for (i = 0; i < TXGBE_MAX_QP; i++) {
1922 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1923 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1926 q_map = (stat_mappings->rqsm[n] >> offset)
1927 & QMAP_FIELD_RESERVED_BITS_MASK;
1928 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1929 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1930 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1931 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1933 q_map = (stat_mappings->tqsm[n] >> offset)
1934 & QMAP_FIELD_RESERVED_BITS_MASK;
1935 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1936 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1937 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1938 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1942 stats->imissed = hw_stats->rx_total_missed_packets;
1943 stats->ierrors = hw_stats->rx_crc_errors +
1944 hw_stats->rx_mac_short_packet_dropped +
1945 hw_stats->rx_length_errors +
1946 hw_stats->rx_undersize_errors +
1947 hw_stats->rx_oversize_errors +
1948 hw_stats->rx_drop_packets +
1949 hw_stats->rx_illegal_byte_errors +
1950 hw_stats->rx_error_bytes +
1951 hw_stats->rx_fragment_errors +
1952 hw_stats->rx_fcoe_crc_errors +
1953 hw_stats->rx_fcoe_mbuf_allocation_errors;
1961 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1963 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1964 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1966 /* HW registers are cleared on read */
1967 hw->offset_loaded = 0;
1968 txgbe_dev_stats_get(dev, NULL);
1969 hw->offset_loaded = 1;
1971 /* Reset software totals */
1972 memset(hw_stats, 0, sizeof(*hw_stats));
1977 /* This function calculates the number of xstats based on the current config */
1979 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1981 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1982 return TXGBE_NB_HW_STATS +
1983 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1984 TXGBE_NB_QP_STATS * nb_queues;
1988 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1992 /* Extended stats from txgbe_hw_stats */
1993 if (id < TXGBE_NB_HW_STATS) {
1994 snprintf(name, size, "[hw]%s",
1995 rte_txgbe_stats_strings[id].name);
1998 id -= TXGBE_NB_HW_STATS;
2000 /* Priority Stats */
2001 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2002 nb = id / TXGBE_NB_UP_STATS;
2003 st = id % TXGBE_NB_UP_STATS;
2004 snprintf(name, size, "[p%u]%s", nb,
2005 rte_txgbe_up_strings[st].name);
2008 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2011 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2012 nb = id / TXGBE_NB_QP_STATS;
2013 st = id % TXGBE_NB_QP_STATS;
2014 snprintf(name, size, "[q%u]%s", nb,
2015 rte_txgbe_qp_strings[st].name);
2018 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2020 return -(int)(id + 1);
2024 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2028 /* Extended stats from txgbe_hw_stats */
2029 if (id < TXGBE_NB_HW_STATS) {
2030 *offset = rte_txgbe_stats_strings[id].offset;
2033 id -= TXGBE_NB_HW_STATS;
2035 /* Priority Stats */
2036 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2037 nb = id / TXGBE_NB_UP_STATS;
2038 st = id % TXGBE_NB_UP_STATS;
2039 *offset = rte_txgbe_up_strings[st].offset +
2040 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2043 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2046 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2047 nb = id / TXGBE_NB_QP_STATS;
2048 st = id % TXGBE_NB_QP_STATS;
2049 *offset = rte_txgbe_qp_strings[st].offset +
2050 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2053 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2055 return -(int)(id + 1);
2058 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2059 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2061 unsigned int i, count;
2063 count = txgbe_xstats_calc_num(dev);
2064 if (xstats_names == NULL)
2067 /* Note: limit >= cnt_stats checked upstream
2068 * in rte_eth_xstats_names()
2070 limit = min(limit, count);
2072 /* Extended stats from txgbe_hw_stats */
2073 for (i = 0; i < limit; i++) {
2074 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2075 sizeof(xstats_names[i].name))) {
2076 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2084 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2085 struct rte_eth_xstat_name *xstats_names,
2086 const uint64_t *ids,
2092 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2094 for (i = 0; i < limit; i++) {
2095 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2096 sizeof(xstats_names[i].name))) {
2097 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2106 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2109 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2110 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2111 unsigned int i, count;
2113 txgbe_read_stats_registers(hw, hw_stats);
2115 /* If this is a reset xstats is NULL, and we have cleared the
2116 * registers by reading them.
2118 count = txgbe_xstats_calc_num(dev);
2122 limit = min(limit, txgbe_xstats_calc_num(dev));
2124 /* Extended stats from txgbe_hw_stats */
2125 for (i = 0; i < limit; i++) {
2126 uint32_t offset = 0;
2128 if (txgbe_get_offset_by_id(i, &offset)) {
2129 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2132 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2140 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2143 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2144 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2145 unsigned int i, count;
2147 txgbe_read_stats_registers(hw, hw_stats);
2149 /* If this is a reset xstats is NULL, and we have cleared the
2150 * registers by reading them.
2152 count = txgbe_xstats_calc_num(dev);
2156 limit = min(limit, txgbe_xstats_calc_num(dev));
2158 /* Extended stats from txgbe_hw_stats */
2159 for (i = 0; i < limit; i++) {
2162 if (txgbe_get_offset_by_id(i, &offset)) {
2163 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2166 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2173 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2174 uint64_t *values, unsigned int limit)
2176 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2180 return txgbe_dev_xstats_get_(dev, values, limit);
2182 for (i = 0; i < limit; i++) {
2185 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2186 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2189 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2196 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2198 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2199 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2201 /* HW registers are cleared on read */
2202 hw->offset_loaded = 0;
2203 txgbe_read_stats_registers(hw, hw_stats);
2204 hw->offset_loaded = 1;
2206 /* Reset software totals */
2207 memset(hw_stats, 0, sizeof(*hw_stats));
2213 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2215 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2216 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2218 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2219 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2220 dev_info->min_rx_bufsize = 1024;
2221 dev_info->max_rx_pktlen = 15872;
2222 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2223 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2224 dev_info->max_vfs = pci_dev->max_vfs;
2225 dev_info->max_vmdq_pools = ETH_64_POOLS;
2226 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2227 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2228 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2229 dev_info->rx_queue_offload_capa);
2230 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2231 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2233 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2235 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2236 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2237 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2239 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2244 dev_info->default_txconf = (struct rte_eth_txconf) {
2246 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2247 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2248 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2250 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2254 dev_info->rx_desc_lim = rx_desc_lim;
2255 dev_info->tx_desc_lim = tx_desc_lim;
2257 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2258 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2259 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2261 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2262 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2264 /* Driver-preferred Rx/Tx parameters */
2265 dev_info->default_rxportconf.burst_size = 32;
2266 dev_info->default_txportconf.burst_size = 32;
2267 dev_info->default_rxportconf.nb_queues = 1;
2268 dev_info->default_txportconf.nb_queues = 1;
2269 dev_info->default_rxportconf.ring_size = 256;
2270 dev_info->default_txportconf.ring_size = 256;
2276 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2278 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2279 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2280 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2281 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2282 return txgbe_get_supported_ptypes();
2288 txgbe_dev_setup_link_alarm_handler(void *param)
2290 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2291 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2292 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2294 bool autoneg = false;
2296 speed = hw->phy.autoneg_advertised;
2298 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2300 hw->mac.setup_link(hw, speed, true);
2302 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2305 /* return 0 means link status changed, -1 means not changed */
2307 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2308 int wait_to_complete)
2310 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2311 struct rte_eth_link link;
2312 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2313 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2318 memset(&link, 0, sizeof(link));
2319 link.link_status = ETH_LINK_DOWN;
2320 link.link_speed = ETH_SPEED_NUM_NONE;
2321 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2322 link.link_autoneg = ETH_LINK_AUTONEG;
2324 hw->mac.get_link_status = true;
2326 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2327 return rte_eth_linkstatus_set(dev, &link);
2329 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2330 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2333 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2336 link.link_speed = ETH_SPEED_NUM_100M;
2337 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2338 return rte_eth_linkstatus_set(dev, &link);
2342 if (hw->phy.media_type == txgbe_media_type_fiber) {
2343 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2344 rte_eal_alarm_set(10,
2345 txgbe_dev_setup_link_alarm_handler, dev);
2347 return rte_eth_linkstatus_set(dev, &link);
2350 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2351 link.link_status = ETH_LINK_UP;
2352 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2354 switch (link_speed) {
2356 case TXGBE_LINK_SPEED_UNKNOWN:
2357 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2358 link.link_speed = ETH_SPEED_NUM_100M;
2361 case TXGBE_LINK_SPEED_100M_FULL:
2362 link.link_speed = ETH_SPEED_NUM_100M;
2365 case TXGBE_LINK_SPEED_1GB_FULL:
2366 link.link_speed = ETH_SPEED_NUM_1G;
2369 case TXGBE_LINK_SPEED_2_5GB_FULL:
2370 link.link_speed = ETH_SPEED_NUM_2_5G;
2373 case TXGBE_LINK_SPEED_5GB_FULL:
2374 link.link_speed = ETH_SPEED_NUM_5G;
2377 case TXGBE_LINK_SPEED_10GB_FULL:
2378 link.link_speed = ETH_SPEED_NUM_10G;
2382 return rte_eth_linkstatus_set(dev, &link);
2386 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2388 return txgbe_dev_link_update_share(dev, wait_to_complete);
2392 * It clears the interrupt causes and enables the interrupt.
2393 * It will be called once only during nic initialized.
2396 * Pointer to struct rte_eth_dev.
2398 * Enable or Disable.
2401 * - On success, zero.
2402 * - On failure, a negative value.
2405 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2407 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2409 txgbe_dev_link_status_print(dev);
2411 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2413 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2419 * It clears the interrupt causes and enables the interrupt.
2420 * It will be called once only during nic initialized.
2423 * Pointer to struct rte_eth_dev.
2426 * - On success, zero.
2427 * - On failure, a negative value.
2430 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2432 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2434 intr->mask[0] |= TXGBE_ICR_MASK;
2435 intr->mask[1] |= TXGBE_ICR_MASK;
2441 * It clears the interrupt causes and enables the interrupt.
2442 * It will be called once only during nic initialized.
2445 * Pointer to struct rte_eth_dev.
2448 * - On success, zero.
2449 * - On failure, a negative value.
2452 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2454 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2456 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2462 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2465 * Pointer to struct rte_eth_dev.
2468 * - On success, zero.
2469 * - On failure, a negative value.
2472 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2475 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2476 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2478 /* clear all cause mask */
2479 txgbe_disable_intr(hw);
2481 /* read-on-clear nic registers here */
2482 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2483 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2487 /* set flag for async link update */
2488 if (eicr & TXGBE_ICRMISC_LSC)
2489 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2491 if (eicr & TXGBE_ICRMISC_VFMBX)
2492 intr->flags |= TXGBE_FLAG_MAILBOX;
2494 if (eicr & TXGBE_ICRMISC_LNKSEC)
2495 intr->flags |= TXGBE_FLAG_MACSEC;
2497 if (eicr & TXGBE_ICRMISC_GPIO)
2498 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2504 * It gets and then prints the link status.
2507 * Pointer to struct rte_eth_dev.
2510 * - On success, zero.
2511 * - On failure, a negative value.
2514 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2516 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2517 struct rte_eth_link link;
2519 rte_eth_linkstatus_get(dev, &link);
2521 if (link.link_status) {
2522 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2523 (int)(dev->data->port_id),
2524 (unsigned int)link.link_speed,
2525 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2526 "full-duplex" : "half-duplex");
2528 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2529 (int)(dev->data->port_id));
2531 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2532 pci_dev->addr.domain,
2534 pci_dev->addr.devid,
2535 pci_dev->addr.function);
2539 * It executes link_update after knowing an interrupt occurred.
2542 * Pointer to struct rte_eth_dev.
2545 * - On success, zero.
2546 * - On failure, a negative value.
2549 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2550 struct rte_intr_handle *intr_handle)
2552 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2554 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2556 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2558 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2559 txgbe_pf_mbx_process(dev);
2560 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2563 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2564 hw->phy.handle_lasi(hw);
2565 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2568 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2569 struct rte_eth_link link;
2571 /*get the link status before link update, for predicting later*/
2572 rte_eth_linkstatus_get(dev, &link);
2574 txgbe_dev_link_update(dev, 0);
2577 if (!link.link_status)
2578 /* handle it 1 sec later, wait it being stable */
2579 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2580 /* likely to down */
2582 /* handle it 4 sec later, wait it being stable */
2583 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2585 txgbe_dev_link_status_print(dev);
2586 if (rte_eal_alarm_set(timeout * 1000,
2587 txgbe_dev_interrupt_delayed_handler,
2589 PMD_DRV_LOG(ERR, "Error setting alarm");
2591 /* remember original mask */
2592 intr->mask_misc_orig = intr->mask_misc;
2593 /* only disable lsc interrupt */
2594 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2598 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2599 txgbe_enable_intr(dev);
2600 rte_intr_enable(intr_handle);
2606 * Interrupt handler which shall be registered for alarm callback for delayed
2607 * handling specific interrupt to wait for the stable nic state. As the
2608 * NIC interrupt state is not stable for txgbe after link is just down,
2609 * it needs to wait 4 seconds to get the stable status.
2612 * Pointer to interrupt handle.
2614 * The address of parameter (struct rte_eth_dev *) registered before.
2620 txgbe_dev_interrupt_delayed_handler(void *param)
2622 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2623 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2624 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2625 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2626 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2629 txgbe_disable_intr(hw);
2631 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2632 if (eicr & TXGBE_ICRMISC_VFMBX)
2633 txgbe_pf_mbx_process(dev);
2635 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2636 hw->phy.handle_lasi(hw);
2637 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2640 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2641 txgbe_dev_link_update(dev, 0);
2642 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2643 txgbe_dev_link_status_print(dev);
2644 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2648 if (intr->flags & TXGBE_FLAG_MACSEC) {
2649 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2651 intr->flags &= ~TXGBE_FLAG_MACSEC;
2654 /* restore original mask */
2655 intr->mask_misc = intr->mask_misc_orig;
2656 intr->mask_misc_orig = 0;
2658 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2659 txgbe_enable_intr(dev);
2660 rte_intr_enable(intr_handle);
2664 * Interrupt handler triggered by NIC for handling
2665 * specific interrupt.
2668 * Pointer to interrupt handle.
2670 * The address of parameter (struct rte_eth_dev *) registered before.
2676 txgbe_dev_interrupt_handler(void *param)
2678 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2680 txgbe_dev_interrupt_get_status(dev);
2681 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2685 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2686 struct rte_eth_rss_reta_entry64 *reta_conf,
2691 uint16_t idx, shift;
2692 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2693 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2695 PMD_INIT_FUNC_TRACE();
2697 if (!txgbe_rss_update_sp(hw->mac.type)) {
2698 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2703 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2704 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2705 "(%d) doesn't match the number hardware can supported "
2706 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2710 for (i = 0; i < reta_size; i += 4) {
2711 idx = i / RTE_RETA_GROUP_SIZE;
2712 shift = i % RTE_RETA_GROUP_SIZE;
2713 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2717 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2718 for (j = 0; j < 4; j++) {
2719 if (RS8(mask, j, 0x1)) {
2720 reta &= ~(MS32(8 * j, 0xFF));
2721 reta |= LS32(reta_conf[idx].reta[shift + j],
2725 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
2727 adapter->rss_reta_updated = 1;
2733 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2734 struct rte_eth_rss_reta_entry64 *reta_conf,
2737 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2740 uint16_t idx, shift;
2742 PMD_INIT_FUNC_TRACE();
2744 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2745 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2746 "(%d) doesn't match the number hardware can supported "
2747 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2751 for (i = 0; i < reta_size; i += 4) {
2752 idx = i / RTE_RETA_GROUP_SIZE;
2753 shift = i % RTE_RETA_GROUP_SIZE;
2754 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2758 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2759 for (j = 0; j < 4; j++) {
2760 if (RS8(mask, j, 0x1))
2761 reta_conf[idx].reta[shift + j] =
2762 (uint16_t)RS32(reta, 8 * j, 0xFF);
2770 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2771 uint32_t index, uint32_t pool)
2773 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2774 uint32_t enable_addr = 1;
2776 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2781 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2783 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2785 txgbe_clear_rar(hw, index);
2789 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2791 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2793 txgbe_remove_rar(dev, 0);
2794 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2800 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2802 uint32_t vector = 0;
2804 switch (hw->mac.mc_filter_type) {
2805 case 0: /* use bits [47:36] of the address */
2806 vector = ((uc_addr->addr_bytes[4] >> 4) |
2807 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2809 case 1: /* use bits [46:35] of the address */
2810 vector = ((uc_addr->addr_bytes[4] >> 3) |
2811 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2813 case 2: /* use bits [45:34] of the address */
2814 vector = ((uc_addr->addr_bytes[4] >> 2) |
2815 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2817 case 3: /* use bits [43:32] of the address */
2818 vector = ((uc_addr->addr_bytes[4]) |
2819 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2821 default: /* Invalid mc_filter_type */
2825 /* vector can only be 12-bits or boundary will be exceeded */
2831 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2832 struct rte_ether_addr *mac_addr, uint8_t on)
2840 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2841 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2843 /* The UTA table only exists on pf hardware */
2844 if (hw->mac.type < txgbe_mac_raptor)
2847 vector = txgbe_uta_vector(hw, mac_addr);
2848 uta_idx = (vector >> 5) & 0x7F;
2849 uta_mask = 0x1UL << (vector & 0x1F);
2851 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2854 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2856 uta_info->uta_in_use++;
2857 reg_val |= uta_mask;
2858 uta_info->uta_shadow[uta_idx] |= uta_mask;
2860 uta_info->uta_in_use--;
2861 reg_val &= ~uta_mask;
2862 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2865 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2867 psrctl = rd32(hw, TXGBE_PSRCTL);
2868 if (uta_info->uta_in_use > 0)
2869 psrctl |= TXGBE_PSRCTL_UCHFENA;
2871 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2873 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2874 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2875 wr32(hw, TXGBE_PSRCTL, psrctl);
2881 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2883 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2884 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2888 /* The UTA table only exists on pf hardware */
2889 if (hw->mac.type < txgbe_mac_raptor)
2893 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2894 uta_info->uta_shadow[i] = ~0;
2895 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2898 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2899 uta_info->uta_shadow[i] = 0;
2900 wr32(hw, TXGBE_UCADDRTBL(i), 0);
2904 psrctl = rd32(hw, TXGBE_PSRCTL);
2906 psrctl |= TXGBE_PSRCTL_UCHFENA;
2908 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2910 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2911 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2912 wr32(hw, TXGBE_PSRCTL, psrctl);
2918 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
2920 uint32_t new_val = orig_val;
2922 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
2923 new_val |= TXGBE_POOLETHCTL_UTA;
2924 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
2925 new_val |= TXGBE_POOLETHCTL_MCHA;
2926 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
2927 new_val |= TXGBE_POOLETHCTL_UCHA;
2928 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
2929 new_val |= TXGBE_POOLETHCTL_BCA;
2930 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
2931 new_val |= TXGBE_POOLETHCTL_MCP;
2937 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2939 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2940 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2942 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2944 if (queue_id < 32) {
2945 mask = rd32(hw, TXGBE_IMS(0));
2946 mask &= (1 << queue_id);
2947 wr32(hw, TXGBE_IMS(0), mask);
2948 } else if (queue_id < 64) {
2949 mask = rd32(hw, TXGBE_IMS(1));
2950 mask &= (1 << (queue_id - 32));
2951 wr32(hw, TXGBE_IMS(1), mask);
2953 rte_intr_enable(intr_handle);
2959 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2962 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2964 if (queue_id < 32) {
2965 mask = rd32(hw, TXGBE_IMS(0));
2966 mask &= ~(1 << queue_id);
2967 wr32(hw, TXGBE_IMS(0), mask);
2968 } else if (queue_id < 64) {
2969 mask = rd32(hw, TXGBE_IMS(1));
2970 mask &= ~(1 << (queue_id - 32));
2971 wr32(hw, TXGBE_IMS(1), mask);
2978 * set the IVAR registers, mapping interrupt causes to vectors
2980 * pointer to txgbe_hw struct
2982 * 0 for Rx, 1 for Tx, -1 for other causes
2984 * queue to map the corresponding interrupt to
2986 * the vector to map to the corresponding queue
2989 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2990 uint8_t queue, uint8_t msix_vector)
2994 if (direction == -1) {
2996 msix_vector |= TXGBE_IVARMISC_VLD;
2998 tmp = rd32(hw, TXGBE_IVARMISC);
2999 tmp &= ~(0xFF << idx);
3000 tmp |= (msix_vector << idx);
3001 wr32(hw, TXGBE_IVARMISC, tmp);
3003 /* rx or tx causes */
3004 /* Workround for ICR lost */
3005 idx = ((16 * (queue & 1)) + (8 * direction));
3006 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3007 tmp &= ~(0xFF << idx);
3008 tmp |= (msix_vector << idx);
3009 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3014 * Sets up the hardware to properly generate MSI-X interrupts
3016 * board private structure
3019 txgbe_configure_msix(struct rte_eth_dev *dev)
3021 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3022 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3023 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3024 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3025 uint32_t vec = TXGBE_MISC_VEC_ID;
3028 /* won't configure msix register if no mapping is done
3029 * between intr vector and event fd
3030 * but if misx has been enabled already, need to configure
3031 * auto clean, auto mask and throttling.
3033 gpie = rd32(hw, TXGBE_GPIE);
3034 if (!rte_intr_dp_is_en(intr_handle) &&
3035 !(gpie & TXGBE_GPIE_MSIX))
3038 if (rte_intr_allow_others(intr_handle)) {
3039 base = TXGBE_RX_VEC_START;
3043 /* setup GPIE for MSI-x mode */
3044 gpie = rd32(hw, TXGBE_GPIE);
3045 gpie |= TXGBE_GPIE_MSIX;
3046 wr32(hw, TXGBE_GPIE, gpie);
3048 /* Populate the IVAR table and set the ITR values to the
3049 * corresponding register.
3051 if (rte_intr_dp_is_en(intr_handle)) {
3052 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3054 /* by default, 1:1 mapping */
3055 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3056 intr_handle->intr_vec[queue_id] = vec;
3057 if (vec < base + intr_handle->nb_efd - 1)
3061 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3063 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3064 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3069 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3070 uint16_t queue_idx, uint16_t tx_rate)
3072 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3075 if (queue_idx >= hw->mac.max_tx_queues)
3079 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3080 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3086 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3087 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3089 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3091 /* Set ARBTXRATE of queue X */
3092 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3093 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3100 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3101 u8 **mc_addr_ptr, u32 *vmdq)
3106 mc_addr = *mc_addr_ptr;
3107 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3112 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3113 struct rte_ether_addr *mc_addr_set,
3114 uint32_t nb_mc_addr)
3116 struct txgbe_hw *hw;
3119 hw = TXGBE_DEV_HW(dev);
3120 mc_addr_list = (u8 *)mc_addr_set;
3121 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3122 txgbe_dev_addr_list_itr, TRUE);
3126 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3129 case txgbe_mac_raptor:
3136 static const struct eth_dev_ops txgbe_eth_dev_ops = {
3137 .dev_configure = txgbe_dev_configure,
3138 .dev_infos_get = txgbe_dev_info_get,
3139 .dev_start = txgbe_dev_start,
3140 .dev_stop = txgbe_dev_stop,
3141 .dev_set_link_up = txgbe_dev_set_link_up,
3142 .dev_set_link_down = txgbe_dev_set_link_down,
3143 .dev_close = txgbe_dev_close,
3144 .dev_reset = txgbe_dev_reset,
3145 .link_update = txgbe_dev_link_update,
3146 .stats_get = txgbe_dev_stats_get,
3147 .xstats_get = txgbe_dev_xstats_get,
3148 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
3149 .stats_reset = txgbe_dev_stats_reset,
3150 .xstats_reset = txgbe_dev_xstats_reset,
3151 .xstats_get_names = txgbe_dev_xstats_get_names,
3152 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
3153 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
3154 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
3155 .vlan_filter_set = txgbe_vlan_filter_set,
3156 .vlan_tpid_set = txgbe_vlan_tpid_set,
3157 .vlan_offload_set = txgbe_vlan_offload_set,
3158 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
3159 .rx_queue_start = txgbe_dev_rx_queue_start,
3160 .rx_queue_stop = txgbe_dev_rx_queue_stop,
3161 .tx_queue_start = txgbe_dev_tx_queue_start,
3162 .tx_queue_stop = txgbe_dev_tx_queue_stop,
3163 .rx_queue_setup = txgbe_dev_rx_queue_setup,
3164 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
3165 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
3166 .rx_queue_release = txgbe_dev_rx_queue_release,
3167 .tx_queue_setup = txgbe_dev_tx_queue_setup,
3168 .tx_queue_release = txgbe_dev_tx_queue_release,
3169 .mac_addr_add = txgbe_add_rar,
3170 .mac_addr_remove = txgbe_remove_rar,
3171 .mac_addr_set = txgbe_set_default_mac_addr,
3172 .uc_hash_table_set = txgbe_uc_hash_table_set,
3173 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
3174 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
3175 .reta_update = txgbe_dev_rss_reta_update,
3176 .reta_query = txgbe_dev_rss_reta_query,
3177 .rss_hash_update = txgbe_dev_rss_hash_update,
3178 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
3179 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
3180 .rxq_info_get = txgbe_rxq_info_get,
3181 .txq_info_get = txgbe_txq_info_get,
3184 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
3185 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
3186 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
3188 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
3189 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
3191 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
3192 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
3194 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
3195 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
3198 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
3199 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);