1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41 struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (h)->bitmap[idx] |= 1 << bit;\
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55 (h)->bitmap[idx] &= ~(1 << bit);\
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61 (r) = (h)->bitmap[idx] >> bit & 1;\
65 * The set of PCI devices this driver supports
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70 { .vendor_id = 0, /* sentinel */ },
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74 .nb_max = TXGBE_RING_DESC_MAX,
75 .nb_min = TXGBE_RING_DESC_MIN,
76 .nb_align = TXGBE_RXD_ALIGN,
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80 .nb_max = TXGBE_RING_DESC_MAX,
81 .nb_min = TXGBE_RING_DESC_MIN,
82 .nb_align = TXGBE_TXD_ALIGN,
83 .nb_seg_max = TXGBE_TX_MAX_SEG,
84 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
93 HW_XSTAT(mng_bmc2host_packets),
94 HW_XSTAT(mng_host2bmc_packets),
100 HW_XSTAT(rx_total_bytes),
101 HW_XSTAT(rx_total_packets),
102 HW_XSTAT(tx_total_packets),
103 HW_XSTAT(rx_total_missed_packets),
104 HW_XSTAT(rx_broadcast_packets),
105 HW_XSTAT(rx_multicast_packets),
106 HW_XSTAT(rx_management_packets),
107 HW_XSTAT(tx_management_packets),
108 HW_XSTAT(rx_management_dropped),
111 HW_XSTAT(rx_crc_errors),
112 HW_XSTAT(rx_illegal_byte_errors),
113 HW_XSTAT(rx_error_bytes),
114 HW_XSTAT(rx_mac_short_packet_dropped),
115 HW_XSTAT(rx_length_errors),
116 HW_XSTAT(rx_undersize_errors),
117 HW_XSTAT(rx_fragment_errors),
118 HW_XSTAT(rx_oversize_errors),
119 HW_XSTAT(rx_jabber_errors),
120 HW_XSTAT(rx_l3_l4_xsum_error),
121 HW_XSTAT(mac_local_errors),
122 HW_XSTAT(mac_remote_errors),
125 HW_XSTAT(flow_director_added_filters),
126 HW_XSTAT(flow_director_removed_filters),
127 HW_XSTAT(flow_director_filter_add_errors),
128 HW_XSTAT(flow_director_filter_remove_errors),
129 HW_XSTAT(flow_director_matched_filters),
130 HW_XSTAT(flow_director_missed_filters),
133 HW_XSTAT(rx_fcoe_crc_errors),
134 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135 HW_XSTAT(rx_fcoe_dropped),
136 HW_XSTAT(rx_fcoe_packets),
137 HW_XSTAT(tx_fcoe_packets),
138 HW_XSTAT(rx_fcoe_bytes),
139 HW_XSTAT(tx_fcoe_bytes),
140 HW_XSTAT(rx_fcoe_no_ddp),
141 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
144 HW_XSTAT(tx_macsec_pkts_untagged),
145 HW_XSTAT(tx_macsec_pkts_encrypted),
146 HW_XSTAT(tx_macsec_pkts_protected),
147 HW_XSTAT(tx_macsec_octets_encrypted),
148 HW_XSTAT(tx_macsec_octets_protected),
149 HW_XSTAT(rx_macsec_pkts_untagged),
150 HW_XSTAT(rx_macsec_pkts_badtag),
151 HW_XSTAT(rx_macsec_pkts_nosci),
152 HW_XSTAT(rx_macsec_pkts_unknownsci),
153 HW_XSTAT(rx_macsec_octets_decrypted),
154 HW_XSTAT(rx_macsec_octets_validated),
155 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156 HW_XSTAT(rx_macsec_sc_pkts_delayed),
157 HW_XSTAT(rx_macsec_sc_pkts_late),
158 HW_XSTAT(rx_macsec_sa_pkts_ok),
159 HW_XSTAT(rx_macsec_sa_pkts_invalid),
160 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
165 HW_XSTAT(rx_size_64_packets),
166 HW_XSTAT(rx_size_65_to_127_packets),
167 HW_XSTAT(rx_size_128_to_255_packets),
168 HW_XSTAT(rx_size_256_to_511_packets),
169 HW_XSTAT(rx_size_512_to_1023_packets),
170 HW_XSTAT(rx_size_1024_to_max_packets),
171 HW_XSTAT(tx_size_64_packets),
172 HW_XSTAT(tx_size_65_to_127_packets),
173 HW_XSTAT(tx_size_128_to_255_packets),
174 HW_XSTAT(tx_size_256_to_511_packets),
175 HW_XSTAT(tx_size_512_to_1023_packets),
176 HW_XSTAT(tx_size_1024_to_max_packets),
179 HW_XSTAT(tx_xon_packets),
180 HW_XSTAT(rx_xon_packets),
181 HW_XSTAT(tx_xoff_packets),
182 HW_XSTAT(rx_xoff_packets),
184 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191 sizeof(rte_txgbe_stats_strings[0]))
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196 UP_XSTAT(rx_up_packets),
197 UP_XSTAT(tx_up_packets),
198 UP_XSTAT(rx_up_bytes),
199 UP_XSTAT(tx_up_bytes),
200 UP_XSTAT(rx_up_drop_packets),
202 UP_XSTAT(tx_up_xon_packets),
203 UP_XSTAT(rx_up_xon_packets),
204 UP_XSTAT(tx_up_xoff_packets),
205 UP_XSTAT(rx_up_xoff_packets),
206 UP_XSTAT(rx_up_dropped),
207 UP_XSTAT(rx_up_mbuf_alloc_errors),
208 UP_XSTAT(tx_up_xon2off_packets),
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212 sizeof(rte_txgbe_up_strings[0]))
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217 QP_XSTAT(rx_qp_packets),
218 QP_XSTAT(tx_qp_packets),
219 QP_XSTAT(rx_qp_bytes),
220 QP_XSTAT(tx_qp_bytes),
221 QP_XSTAT(rx_qp_mc_packets),
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225 sizeof(rte_txgbe_qp_strings[0]))
228 txgbe_is_sfp(struct txgbe_hw *hw)
230 switch (hw->phy.type) {
231 case txgbe_phy_sfp_avago:
232 case txgbe_phy_sfp_ftl:
233 case txgbe_phy_sfp_intel:
234 case txgbe_phy_sfp_unknown:
235 case txgbe_phy_sfp_tyco_passive:
236 case txgbe_phy_sfp_unknown_passive:
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
249 status = hw->mac.reset_hw(hw);
251 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
257 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
263 txgbe_enable_intr(struct rte_eth_dev *dev)
265 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
268 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
275 txgbe_disable_intr(struct txgbe_hw *hw)
277 PMD_INIT_FUNC_TRACE();
279 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
291 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292 struct txgbe_stat_mappings *stat_mappings =
293 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294 uint32_t qsmr_mask = 0;
295 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
299 if (hw->mac.type != txgbe_mac_raptor)
302 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
305 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
309 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310 if (n >= TXGBE_NB_STAT_MAPPING) {
311 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
314 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
316 /* Now clear any previous stat_idx set */
317 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
319 stat_mappings->tqsm[n] &= ~clearing_mask;
321 stat_mappings->rqsm[n] &= ~clearing_mask;
323 q_map = (uint32_t)stat_idx;
324 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
327 stat_mappings->tqsm[n] |= qsmr_mask;
329 stat_mappings->rqsm[n] |= qsmr_mask;
331 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
334 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
340 * Ensure that all locks are released before first NVM or PHY access
343 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
348 * These ones are more tricky since they are common to all ports; but
349 * swfw_sync retries last long enough (1s) to be almost sure that if
350 * lock can not be taken it is due to an improper lock of the
353 mask = TXGBE_MNGSEM_SWPHY |
355 TXGBE_MNGSEM_SWFLASH;
356 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
357 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
359 hw->mac.release_swfw_sync(hw, mask);
363 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
365 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
366 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
368 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
369 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
370 const struct rte_memzone *mz;
374 PMD_INIT_FUNC_TRACE();
376 eth_dev->dev_ops = &txgbe_eth_dev_ops;
377 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
378 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
379 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
382 * For secondary processes, we don't initialise any further as primary
383 * has already done this work. Only check we don't need a different
384 * RX and TX function.
386 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
387 struct txgbe_tx_queue *txq;
388 /* TX queue function in primary, set by last queue initialized
389 * Tx queue may not initialized by primary process
391 if (eth_dev->data->tx_queues) {
392 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
393 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
394 txgbe_set_tx_function(eth_dev, txq);
396 /* Use default TX function if we get here */
397 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
398 "Using default TX function.");
401 txgbe_set_rx_function(eth_dev);
406 rte_eth_copy_pci_info(eth_dev, pci_dev);
408 /* Vendor and Device ID need to be set before init of shared code */
409 hw->device_id = pci_dev->id.device_id;
410 hw->vendor_id = pci_dev->id.vendor_id;
411 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
412 hw->allow_unsupported_sfp = 1;
414 /* Reserve memory for interrupt status block */
415 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
416 16, TXGBE_ALIGN, SOCKET_ID_ANY);
420 hw->isb_dma = TMZ_PADDR(mz);
421 hw->isb_mem = TMZ_VADDR(mz);
423 /* Initialize the shared code (base driver) */
424 err = txgbe_init_shared_code(hw);
426 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
430 /* Unlock any pending hardware semaphore */
431 txgbe_swfw_lock_reset(hw);
433 err = hw->rom.init_params(hw);
435 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
439 /* Make sure we have a good EEPROM before we read from it */
440 err = hw->rom.validate_checksum(hw, &csum);
442 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
446 err = hw->mac.init_hw(hw);
449 * Devices with copper phys will fail to initialise if txgbe_init_hw()
450 * is called too soon after the kernel driver unbinding/binding occurs.
451 * The failure occurs in txgbe_identify_phy() for all devices,
452 * but for non-copper devies, txgbe_identify_sfp_module() is
453 * also called. See txgbe_identify_phy(). The reason for the
454 * failure is not known, and only occuts when virtualisation features
455 * are disabled in the bios. A delay of 200ms was found to be enough by
456 * trial-and-error, and is doubled to be safe.
458 if (err && hw->phy.media_type == txgbe_media_type_copper) {
460 err = hw->mac.init_hw(hw);
463 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
466 if (err == TXGBE_ERR_EEPROM_VERSION) {
467 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
468 "LOM. Please be aware there may be issues associated "
469 "with your hardware.");
470 PMD_INIT_LOG(ERR, "If you are experiencing problems "
471 "please contact your hardware representative "
472 "who provided you with this hardware.");
473 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
474 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
477 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
481 /* Reset the hw statistics */
482 txgbe_dev_stats_reset(eth_dev);
484 /* disable interrupt */
485 txgbe_disable_intr(hw);
487 /* Allocate memory for storing MAC addresses */
488 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
489 hw->mac.num_rar_entries, 0);
490 if (eth_dev->data->mac_addrs == NULL) {
492 "Failed to allocate %u bytes needed to store "
494 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
498 /* Copy the permanent MAC address */
499 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
500 ð_dev->data->mac_addrs[0]);
502 /* Allocate memory for storing hash filter MAC addresses */
503 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
504 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
505 if (eth_dev->data->hash_mac_addrs == NULL) {
507 "Failed to allocate %d bytes needed to store MAC addresses",
508 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
512 /* initialize the vfta */
513 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
515 /* initialize the hw strip bitmap*/
516 memset(hwstrip, 0, sizeof(*hwstrip));
518 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
519 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
520 (int)hw->mac.type, (int)hw->phy.type,
521 (int)hw->phy.sfp_type);
523 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
524 (int)hw->mac.type, (int)hw->phy.type);
526 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
527 eth_dev->data->port_id, pci_dev->id.vendor_id,
528 pci_dev->id.device_id);
530 rte_intr_callback_register(intr_handle,
531 txgbe_dev_interrupt_handler, eth_dev);
533 /* enable uio/vfio intr/eventfd mapping */
534 rte_intr_enable(intr_handle);
536 /* enable support intr */
537 txgbe_enable_intr(eth_dev);
543 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
545 PMD_INIT_FUNC_TRACE();
547 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
550 txgbe_dev_close(eth_dev);
556 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
557 struct rte_pci_device *pci_dev)
559 struct rte_eth_dev *pf_ethdev;
560 struct rte_eth_devargs eth_da;
563 if (pci_dev->device.devargs) {
564 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
569 memset(ð_da, 0, sizeof(eth_da));
572 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
573 sizeof(struct txgbe_adapter),
574 eth_dev_pci_specific_init, pci_dev,
575 eth_txgbe_dev_init, NULL);
577 if (retval || eth_da.nb_representor_ports < 1)
580 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
581 if (pf_ethdev == NULL)
587 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
589 struct rte_eth_dev *ethdev;
591 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
595 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
598 static struct rte_pci_driver rte_txgbe_pmd = {
599 .id_table = pci_id_txgbe_map,
600 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
601 RTE_PCI_DRV_INTR_LSC,
602 .probe = eth_txgbe_pci_probe,
603 .remove = eth_txgbe_pci_remove,
607 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
609 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
610 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
615 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
616 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
617 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
622 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
624 /* update local VFTA copy */
625 shadow_vfta->vfta[vid_idx] = vfta;
631 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
633 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
634 struct txgbe_rx_queue *rxq;
636 uint32_t rxcfg, rxbal, rxbah;
639 txgbe_vlan_hw_strip_enable(dev, queue);
641 txgbe_vlan_hw_strip_disable(dev, queue);
643 rxq = dev->data->rx_queues[queue];
644 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
645 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
646 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
647 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
648 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
649 !(rxcfg & TXGBE_RXCFG_VLAN);
650 rxcfg |= TXGBE_RXCFG_VLAN;
652 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
653 (rxcfg & TXGBE_RXCFG_VLAN);
654 rxcfg &= ~TXGBE_RXCFG_VLAN;
656 rxcfg &= ~TXGBE_RXCFG_ENA;
659 /* set vlan strip for ring */
660 txgbe_dev_rx_queue_stop(dev, queue);
661 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
662 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
663 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
664 txgbe_dev_rx_queue_start(dev, queue);
669 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
670 enum rte_vlan_type vlan_type,
673 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
675 uint32_t portctrl, vlan_ext, qinq;
677 portctrl = rd32(hw, TXGBE_PORTCTL);
679 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
680 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
682 case ETH_VLAN_TYPE_INNER:
684 wr32m(hw, TXGBE_VLANCTL,
685 TXGBE_VLANCTL_TPID_MASK,
686 TXGBE_VLANCTL_TPID(tpid));
687 wr32m(hw, TXGBE_DMATXCTRL,
688 TXGBE_DMATXCTRL_TPID_MASK,
689 TXGBE_DMATXCTRL_TPID(tpid));
692 PMD_DRV_LOG(ERR, "Inner type is not supported"
697 wr32m(hw, TXGBE_TAGTPID(0),
698 TXGBE_TAGTPID_LSB_MASK,
699 TXGBE_TAGTPID_LSB(tpid));
702 case ETH_VLAN_TYPE_OUTER:
704 /* Only the high 16-bits is valid */
705 wr32m(hw, TXGBE_EXTAG,
706 TXGBE_EXTAG_VLAN_MASK,
707 TXGBE_EXTAG_VLAN(tpid));
709 wr32m(hw, TXGBE_VLANCTL,
710 TXGBE_VLANCTL_TPID_MASK,
711 TXGBE_VLANCTL_TPID(tpid));
712 wr32m(hw, TXGBE_DMATXCTRL,
713 TXGBE_DMATXCTRL_TPID_MASK,
714 TXGBE_DMATXCTRL_TPID(tpid));
718 wr32m(hw, TXGBE_TAGTPID(0),
719 TXGBE_TAGTPID_MSB_MASK,
720 TXGBE_TAGTPID_MSB(tpid));
724 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
732 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
734 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
737 PMD_INIT_FUNC_TRACE();
739 /* Filter Table Disable */
740 vlnctrl = rd32(hw, TXGBE_VLANCTL);
741 vlnctrl &= ~TXGBE_VLANCTL_VFE;
742 wr32(hw, TXGBE_VLANCTL, vlnctrl);
746 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
748 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
749 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
753 PMD_INIT_FUNC_TRACE();
755 /* Filter Table Enable */
756 vlnctrl = rd32(hw, TXGBE_VLANCTL);
757 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
758 vlnctrl |= TXGBE_VLANCTL_VFE;
759 wr32(hw, TXGBE_VLANCTL, vlnctrl);
761 /* write whatever is in local vfta copy */
762 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
763 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
767 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
769 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
770 struct txgbe_rx_queue *rxq;
772 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
776 TXGBE_SET_HWSTRIP(hwstrip, queue);
778 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
780 if (queue >= dev->data->nb_rx_queues)
783 rxq = dev->data->rx_queues[queue];
786 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
787 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
789 rxq->vlan_flags = PKT_RX_VLAN;
790 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
795 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
797 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
800 PMD_INIT_FUNC_TRACE();
802 ctrl = rd32(hw, TXGBE_RXCFG(queue));
803 ctrl &= ~TXGBE_RXCFG_VLAN;
804 wr32(hw, TXGBE_RXCFG(queue), ctrl);
806 /* record those setting for HW strip per queue */
807 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
811 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
813 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
816 PMD_INIT_FUNC_TRACE();
818 ctrl = rd32(hw, TXGBE_RXCFG(queue));
819 ctrl |= TXGBE_RXCFG_VLAN;
820 wr32(hw, TXGBE_RXCFG(queue), ctrl);
822 /* record those setting for HW strip per queue */
823 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
827 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
829 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
832 PMD_INIT_FUNC_TRACE();
834 ctrl = rd32(hw, TXGBE_PORTCTL);
835 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
836 ctrl &= ~TXGBE_PORTCTL_QINQ;
837 wr32(hw, TXGBE_PORTCTL, ctrl);
841 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
843 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
844 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
845 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
848 PMD_INIT_FUNC_TRACE();
850 ctrl = rd32(hw, TXGBE_PORTCTL);
851 ctrl |= TXGBE_PORTCTL_VLANEXT;
852 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
853 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
854 ctrl |= TXGBE_PORTCTL_QINQ;
855 wr32(hw, TXGBE_PORTCTL, ctrl);
859 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
861 struct txgbe_rx_queue *rxq;
864 PMD_INIT_FUNC_TRACE();
866 for (i = 0; i < dev->data->nb_rx_queues; i++) {
867 rxq = dev->data->rx_queues[i];
869 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
870 txgbe_vlan_strip_queue_set(dev, i, 1);
872 txgbe_vlan_strip_queue_set(dev, i, 0);
877 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
880 struct rte_eth_rxmode *rxmode;
881 struct txgbe_rx_queue *rxq;
883 if (mask & ETH_VLAN_STRIP_MASK) {
884 rxmode = &dev->data->dev_conf.rxmode;
885 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
886 for (i = 0; i < dev->data->nb_rx_queues; i++) {
887 rxq = dev->data->rx_queues[i];
888 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
891 for (i = 0; i < dev->data->nb_rx_queues; i++) {
892 rxq = dev->data->rx_queues[i];
893 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
899 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
901 struct rte_eth_rxmode *rxmode;
902 rxmode = &dev->data->dev_conf.rxmode;
904 if (mask & ETH_VLAN_STRIP_MASK)
905 txgbe_vlan_hw_strip_config(dev);
907 if (mask & ETH_VLAN_FILTER_MASK) {
908 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
909 txgbe_vlan_hw_filter_enable(dev);
911 txgbe_vlan_hw_filter_disable(dev);
914 if (mask & ETH_VLAN_EXTEND_MASK) {
915 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
916 txgbe_vlan_hw_extend_enable(dev);
918 txgbe_vlan_hw_extend_disable(dev);
925 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
927 txgbe_config_vlan_strip_on_all_queues(dev, mask);
929 txgbe_vlan_offload_config(dev, mask);
935 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
937 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
942 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
945 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
951 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
952 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
953 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
954 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
959 txgbe_check_mq_mode(struct rte_eth_dev *dev)
961 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
962 uint16_t nb_rx_q = dev->data->nb_rx_queues;
963 uint16_t nb_tx_q = dev->data->nb_tx_queues;
965 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
966 /* check multi-queue mode */
967 switch (dev_conf->rxmode.mq_mode) {
968 case ETH_MQ_RX_VMDQ_DCB:
969 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
971 case ETH_MQ_RX_VMDQ_DCB_RSS:
972 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
973 PMD_INIT_LOG(ERR, "SRIOV active,"
974 " unsupported mq_mode rx %d.",
975 dev_conf->rxmode.mq_mode);
978 case ETH_MQ_RX_VMDQ_RSS:
979 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
980 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
981 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
982 PMD_INIT_LOG(ERR, "SRIOV is active,"
983 " invalid queue number"
984 " for VMDQ RSS, allowed"
985 " value are 1, 2 or 4.");
989 case ETH_MQ_RX_VMDQ_ONLY:
991 /* if nothing mq mode configure, use default scheme */
992 dev->data->dev_conf.rxmode.mq_mode =
995 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
996 /* SRIOV only works in VMDq enable mode */
997 PMD_INIT_LOG(ERR, "SRIOV is active,"
998 " wrong mq_mode rx %d.",
999 dev_conf->rxmode.mq_mode);
1003 switch (dev_conf->txmode.mq_mode) {
1004 case ETH_MQ_TX_VMDQ_DCB:
1005 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1006 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1008 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1009 dev->data->dev_conf.txmode.mq_mode =
1010 ETH_MQ_TX_VMDQ_ONLY;
1014 /* check valid queue number */
1015 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1016 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1017 PMD_INIT_LOG(ERR, "SRIOV is active,"
1018 " nb_rx_q=%d nb_tx_q=%d queue number"
1019 " must be less than or equal to %d.",
1021 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1025 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1026 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1030 /* check configuration for vmdb+dcb mode */
1031 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1032 const struct rte_eth_vmdq_dcb_conf *conf;
1034 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1035 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1036 TXGBE_VMDQ_DCB_NB_QUEUES);
1039 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1040 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1041 conf->nb_queue_pools == ETH_32_POOLS)) {
1042 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1043 " nb_queue_pools must be %d or %d.",
1044 ETH_16_POOLS, ETH_32_POOLS);
1048 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1049 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1051 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1052 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1053 TXGBE_VMDQ_DCB_NB_QUEUES);
1056 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1057 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1058 conf->nb_queue_pools == ETH_32_POOLS)) {
1059 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1060 " nb_queue_pools != %d and"
1061 " nb_queue_pools != %d.",
1062 ETH_16_POOLS, ETH_32_POOLS);
1067 /* For DCB mode check our configuration before we go further */
1068 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1069 const struct rte_eth_dcb_rx_conf *conf;
1071 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1072 if (!(conf->nb_tcs == ETH_4_TCS ||
1073 conf->nb_tcs == ETH_8_TCS)) {
1074 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1075 " and nb_tcs != %d.",
1076 ETH_4_TCS, ETH_8_TCS);
1081 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1082 const struct rte_eth_dcb_tx_conf *conf;
1084 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1085 if (!(conf->nb_tcs == ETH_4_TCS ||
1086 conf->nb_tcs == ETH_8_TCS)) {
1087 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1088 " and nb_tcs != %d.",
1089 ETH_4_TCS, ETH_8_TCS);
1098 txgbe_dev_configure(struct rte_eth_dev *dev)
1100 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1101 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1104 PMD_INIT_FUNC_TRACE();
1106 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1107 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1109 /* multiple queue mode checking */
1110 ret = txgbe_check_mq_mode(dev);
1112 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1117 /* set flag to update link status after init */
1118 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1121 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1122 * allocation Rx preconditions we will reset it.
1124 adapter->rx_bulk_alloc_allowed = true;
1130 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1132 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1133 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1136 gpie = rd32(hw, TXGBE_GPIOINTEN);
1137 gpie |= TXGBE_GPIOBIT_6;
1138 wr32(hw, TXGBE_GPIOINTEN, gpie);
1139 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1143 * Configure device link speed and setup link.
1144 * It returns 0 on success.
1147 txgbe_dev_start(struct rte_eth_dev *dev)
1149 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1150 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1151 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1152 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1153 uint32_t intr_vector = 0;
1155 bool link_up = false, negotiate = 0;
1157 uint32_t allowed_speeds = 0;
1160 uint32_t *link_speeds;
1162 PMD_INIT_FUNC_TRACE();
1164 /* TXGBE devices don't support:
1165 * - half duplex (checked afterwards for valid speeds)
1166 * - fixed speed: TODO implement
1168 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1170 "Invalid link_speeds for port %u, fix speed not supported",
1171 dev->data->port_id);
1175 /* Stop the link setup handler before resetting the HW. */
1176 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1178 /* disable uio/vfio intr/eventfd mapping */
1179 rte_intr_disable(intr_handle);
1182 hw->adapter_stopped = 0;
1185 /* reinitialize adapter
1186 * this calls reset and start
1188 hw->nb_rx_queues = dev->data->nb_rx_queues;
1189 hw->nb_tx_queues = dev->data->nb_tx_queues;
1190 status = txgbe_pf_reset_hw(hw);
1193 hw->mac.start_hw(hw);
1194 hw->mac.get_link_status = true;
1196 txgbe_dev_phy_intr_setup(dev);
1198 /* check and configure queue intr-vector mapping */
1199 if ((rte_intr_cap_multiple(intr_handle) ||
1200 !RTE_ETH_DEV_SRIOV(dev).active) &&
1201 dev->data->dev_conf.intr_conf.rxq != 0) {
1202 intr_vector = dev->data->nb_rx_queues;
1203 if (rte_intr_efd_enable(intr_handle, intr_vector))
1207 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1208 intr_handle->intr_vec =
1209 rte_zmalloc("intr_vec",
1210 dev->data->nb_rx_queues * sizeof(int), 0);
1211 if (intr_handle->intr_vec == NULL) {
1212 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1213 " intr_vec", dev->data->nb_rx_queues);
1218 /* confiugre msix for sleep until rx interrupt */
1219 txgbe_configure_msix(dev);
1221 /* initialize transmission unit */
1222 txgbe_dev_tx_init(dev);
1224 /* This can fail when allocating mbufs for descriptor rings */
1225 err = txgbe_dev_rx_init(dev);
1227 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1231 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1232 ETH_VLAN_EXTEND_MASK;
1233 err = txgbe_vlan_offload_config(dev, mask);
1235 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1239 err = txgbe_dev_rxtx_start(dev);
1241 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1245 /* Skip link setup if loopback mode is enabled. */
1246 if (hw->mac.type == txgbe_mac_raptor &&
1247 dev->data->dev_conf.lpbk_mode)
1248 goto skip_link_setup;
1250 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1251 err = hw->mac.setup_sfp(hw);
1256 if (hw->phy.media_type == txgbe_media_type_copper) {
1257 /* Turn on the copper */
1258 hw->phy.set_phy_power(hw, true);
1260 /* Turn on the laser */
1261 hw->mac.enable_tx_laser(hw);
1264 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1267 dev->data->dev_link.link_status = link_up;
1269 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1273 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1276 link_speeds = &dev->data->dev_conf.link_speeds;
1277 if (*link_speeds & ~allowed_speeds) {
1278 PMD_INIT_LOG(ERR, "Invalid link setting");
1283 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1284 speed = (TXGBE_LINK_SPEED_100M_FULL |
1285 TXGBE_LINK_SPEED_1GB_FULL |
1286 TXGBE_LINK_SPEED_10GB_FULL);
1288 if (*link_speeds & ETH_LINK_SPEED_10G)
1289 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1290 if (*link_speeds & ETH_LINK_SPEED_5G)
1291 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1292 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1293 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1294 if (*link_speeds & ETH_LINK_SPEED_1G)
1295 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1296 if (*link_speeds & ETH_LINK_SPEED_100M)
1297 speed |= TXGBE_LINK_SPEED_100M_FULL;
1300 err = hw->mac.setup_link(hw, speed, link_up);
1306 if (rte_intr_allow_others(intr_handle)) {
1307 /* check if lsc interrupt is enabled */
1308 if (dev->data->dev_conf.intr_conf.lsc != 0)
1309 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1311 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1312 txgbe_dev_macsec_interrupt_setup(dev);
1313 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1315 rte_intr_callback_unregister(intr_handle,
1316 txgbe_dev_interrupt_handler, dev);
1317 if (dev->data->dev_conf.intr_conf.lsc != 0)
1318 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1319 " no intr multiplex");
1322 /* check if rxq interrupt is enabled */
1323 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1324 rte_intr_dp_is_en(intr_handle))
1325 txgbe_dev_rxq_interrupt_setup(dev);
1327 /* enable uio/vfio intr/eventfd mapping */
1328 rte_intr_enable(intr_handle);
1330 /* resume enabled intr since hw reset */
1331 txgbe_enable_intr(dev);
1334 * Update link status right before return, because it may
1335 * start link configuration process in a separate thread.
1337 txgbe_dev_link_update(dev, 0);
1339 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1341 txgbe_read_stats_registers(hw, hw_stats);
1342 hw->offset_loaded = 1;
1347 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1348 txgbe_dev_clear_queues(dev);
1353 * Stop device: disable rx and tx functions to allow for reconfiguring.
1356 txgbe_dev_stop(struct rte_eth_dev *dev)
1358 struct rte_eth_link link;
1359 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1363 if (hw->adapter_stopped)
1366 PMD_INIT_FUNC_TRACE();
1368 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1370 /* disable interrupts */
1371 txgbe_disable_intr(hw);
1374 txgbe_pf_reset_hw(hw);
1375 hw->adapter_stopped = 0;
1380 if (hw->phy.media_type == txgbe_media_type_copper) {
1381 /* Turn off the copper */
1382 hw->phy.set_phy_power(hw, false);
1384 /* Turn off the laser */
1385 hw->mac.disable_tx_laser(hw);
1388 txgbe_dev_clear_queues(dev);
1390 /* Clear stored conf */
1391 dev->data->scattered_rx = 0;
1394 /* Clear recorded link status */
1395 memset(&link, 0, sizeof(link));
1396 rte_eth_linkstatus_set(dev, &link);
1398 if (!rte_intr_allow_others(intr_handle))
1399 /* resume to the default handler */
1400 rte_intr_callback_register(intr_handle,
1401 txgbe_dev_interrupt_handler,
1404 /* Clean datapath event and queue/vec mapping */
1405 rte_intr_efd_disable(intr_handle);
1406 if (intr_handle->intr_vec != NULL) {
1407 rte_free(intr_handle->intr_vec);
1408 intr_handle->intr_vec = NULL;
1411 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1413 hw->adapter_stopped = true;
1414 dev->data->dev_started = 0;
1420 * Set device link up: enable tx.
1423 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1427 if (hw->phy.media_type == txgbe_media_type_copper) {
1428 /* Turn on the copper */
1429 hw->phy.set_phy_power(hw, true);
1431 /* Turn on the laser */
1432 hw->mac.enable_tx_laser(hw);
1433 txgbe_dev_link_update(dev, 0);
1440 * Set device link down: disable tx.
1443 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1445 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1447 if (hw->phy.media_type == txgbe_media_type_copper) {
1448 /* Turn off the copper */
1449 hw->phy.set_phy_power(hw, false);
1451 /* Turn off the laser */
1452 hw->mac.disable_tx_laser(hw);
1453 txgbe_dev_link_update(dev, 0);
1460 * Reset and stop device.
1463 txgbe_dev_close(struct rte_eth_dev *dev)
1465 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1466 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1467 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1471 PMD_INIT_FUNC_TRACE();
1473 txgbe_pf_reset_hw(hw);
1475 ret = txgbe_dev_stop(dev);
1477 txgbe_dev_free_queues(dev);
1479 /* reprogram the RAR[0] in case user changed it. */
1480 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1482 /* Unlock any pending hardware semaphore */
1483 txgbe_swfw_lock_reset(hw);
1485 /* disable uio intr before callback unregister */
1486 rte_intr_disable(intr_handle);
1489 ret = rte_intr_callback_unregister(intr_handle,
1490 txgbe_dev_interrupt_handler, dev);
1491 if (ret >= 0 || ret == -ENOENT) {
1493 } else if (ret != -EAGAIN) {
1495 "intr callback unregister failed: %d",
1499 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1501 /* cancel the delay handler before remove dev */
1502 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1504 rte_free(dev->data->mac_addrs);
1505 dev->data->mac_addrs = NULL;
1507 rte_free(dev->data->hash_mac_addrs);
1508 dev->data->hash_mac_addrs = NULL;
1517 txgbe_dev_reset(struct rte_eth_dev *dev)
1521 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1522 * its VF to make them align with it. The detailed notification
1523 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1524 * To avoid unexpected behavior in VF, currently reset of PF with
1525 * SR-IOV activation is not supported. It might be supported later.
1527 if (dev->data->sriov.active)
1530 ret = eth_txgbe_dev_uninit(dev);
1534 ret = eth_txgbe_dev_init(dev, NULL);
1539 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1541 uint32_t current_counter = rd32(hw, reg); \
1542 if (current_counter < last_counter) \
1543 current_counter += 0x100000000LL; \
1544 if (!hw->offset_loaded) \
1545 last_counter = current_counter; \
1546 counter = current_counter - last_counter; \
1547 counter &= 0xFFFFFFFFLL; \
1550 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1552 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1553 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1554 uint64_t current_counter = (current_counter_msb << 32) | \
1555 current_counter_lsb; \
1556 if (current_counter < last_counter) \
1557 current_counter += 0x1000000000LL; \
1558 if (!hw->offset_loaded) \
1559 last_counter = current_counter; \
1560 counter = current_counter - last_counter; \
1561 counter &= 0xFFFFFFFFFLL; \
1565 txgbe_read_stats_registers(struct txgbe_hw *hw,
1566 struct txgbe_hw_stats *hw_stats)
1571 for (i = 0; i < hw->nb_rx_queues; i++) {
1572 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1573 hw->qp_last[i].rx_qp_packets,
1574 hw_stats->qp[i].rx_qp_packets);
1575 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1576 hw->qp_last[i].rx_qp_bytes,
1577 hw_stats->qp[i].rx_qp_bytes);
1578 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1579 hw->qp_last[i].rx_qp_mc_packets,
1580 hw_stats->qp[i].rx_qp_mc_packets);
1583 for (i = 0; i < hw->nb_tx_queues; i++) {
1584 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1585 hw->qp_last[i].tx_qp_packets,
1586 hw_stats->qp[i].tx_qp_packets);
1587 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1588 hw->qp_last[i].tx_qp_bytes,
1589 hw_stats->qp[i].tx_qp_bytes);
1592 for (i = 0; i < TXGBE_MAX_UP; i++) {
1593 hw_stats->up[i].rx_up_xon_packets +=
1594 rd32(hw, TXGBE_PBRXUPXON(i));
1595 hw_stats->up[i].rx_up_xoff_packets +=
1596 rd32(hw, TXGBE_PBRXUPXOFF(i));
1597 hw_stats->up[i].tx_up_xon_packets +=
1598 rd32(hw, TXGBE_PBTXUPXON(i));
1599 hw_stats->up[i].tx_up_xoff_packets +=
1600 rd32(hw, TXGBE_PBTXUPXOFF(i));
1601 hw_stats->up[i].tx_up_xon2off_packets +=
1602 rd32(hw, TXGBE_PBTXUPOFF(i));
1603 hw_stats->up[i].rx_up_dropped +=
1604 rd32(hw, TXGBE_PBRXMISS(i));
1606 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1607 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1608 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1609 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1612 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1613 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1615 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1616 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1617 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1620 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1621 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1622 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1624 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1625 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1626 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1628 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1629 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1631 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1632 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1633 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1634 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1635 hw_stats->rx_size_512_to_1023_packets +=
1636 rd64(hw, TXGBE_MACRX512TO1023L);
1637 hw_stats->rx_size_1024_to_max_packets +=
1638 rd64(hw, TXGBE_MACRX1024TOMAXL);
1639 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1640 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1641 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1642 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1643 hw_stats->tx_size_512_to_1023_packets +=
1644 rd64(hw, TXGBE_MACTX512TO1023L);
1645 hw_stats->tx_size_1024_to_max_packets +=
1646 rd64(hw, TXGBE_MACTX1024TOMAXL);
1648 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1649 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1650 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1653 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1654 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1655 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1656 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1659 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1660 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1661 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1662 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1663 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1664 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1665 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1667 /* Flow Director Stats */
1668 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1669 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1670 hw_stats->flow_director_added_filters +=
1671 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1672 hw_stats->flow_director_removed_filters +=
1673 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1674 hw_stats->flow_director_filter_add_errors +=
1675 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1676 hw_stats->flow_director_filter_remove_errors +=
1677 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1680 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1681 hw_stats->tx_macsec_pkts_encrypted +=
1682 rd32(hw, TXGBE_LSECTX_ENCPKT);
1683 hw_stats->tx_macsec_pkts_protected +=
1684 rd32(hw, TXGBE_LSECTX_PROTPKT);
1685 hw_stats->tx_macsec_octets_encrypted +=
1686 rd32(hw, TXGBE_LSECTX_ENCOCT);
1687 hw_stats->tx_macsec_octets_protected +=
1688 rd32(hw, TXGBE_LSECTX_PROTOCT);
1689 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1690 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1691 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1692 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1693 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1694 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1695 hw_stats->rx_macsec_sc_pkts_unchecked +=
1696 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1697 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1698 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1699 for (i = 0; i < 2; i++) {
1700 hw_stats->rx_macsec_sa_pkts_ok +=
1701 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1702 hw_stats->rx_macsec_sa_pkts_invalid +=
1703 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1704 hw_stats->rx_macsec_sa_pkts_notvalid +=
1705 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1707 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1708 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1709 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1710 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1712 hw_stats->rx_total_missed_packets = 0;
1713 for (i = 0; i < TXGBE_MAX_UP; i++) {
1714 hw_stats->rx_total_missed_packets +=
1715 hw_stats->up[i].rx_up_dropped;
1720 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1722 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1723 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1724 struct txgbe_stat_mappings *stat_mappings =
1725 TXGBE_DEV_STAT_MAPPINGS(dev);
1728 txgbe_read_stats_registers(hw, hw_stats);
1733 /* Fill out the rte_eth_stats statistics structure */
1734 stats->ipackets = hw_stats->rx_packets;
1735 stats->ibytes = hw_stats->rx_bytes;
1736 stats->opackets = hw_stats->tx_packets;
1737 stats->obytes = hw_stats->tx_bytes;
1739 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1740 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1741 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1742 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1743 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1744 for (i = 0; i < TXGBE_MAX_QP; i++) {
1745 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1746 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1749 q_map = (stat_mappings->rqsm[n] >> offset)
1750 & QMAP_FIELD_RESERVED_BITS_MASK;
1751 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1752 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1753 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1754 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1756 q_map = (stat_mappings->tqsm[n] >> offset)
1757 & QMAP_FIELD_RESERVED_BITS_MASK;
1758 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1759 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1760 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1761 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1765 stats->imissed = hw_stats->rx_total_missed_packets;
1766 stats->ierrors = hw_stats->rx_crc_errors +
1767 hw_stats->rx_mac_short_packet_dropped +
1768 hw_stats->rx_length_errors +
1769 hw_stats->rx_undersize_errors +
1770 hw_stats->rx_oversize_errors +
1771 hw_stats->rx_drop_packets +
1772 hw_stats->rx_illegal_byte_errors +
1773 hw_stats->rx_error_bytes +
1774 hw_stats->rx_fragment_errors +
1775 hw_stats->rx_fcoe_crc_errors +
1776 hw_stats->rx_fcoe_mbuf_allocation_errors;
1784 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1786 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1787 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1789 /* HW registers are cleared on read */
1790 hw->offset_loaded = 0;
1791 txgbe_dev_stats_get(dev, NULL);
1792 hw->offset_loaded = 1;
1794 /* Reset software totals */
1795 memset(hw_stats, 0, sizeof(*hw_stats));
1800 /* This function calculates the number of xstats based on the current config */
1802 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1804 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1805 return TXGBE_NB_HW_STATS +
1806 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1807 TXGBE_NB_QP_STATS * nb_queues;
1811 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1815 /* Extended stats from txgbe_hw_stats */
1816 if (id < TXGBE_NB_HW_STATS) {
1817 snprintf(name, size, "[hw]%s",
1818 rte_txgbe_stats_strings[id].name);
1821 id -= TXGBE_NB_HW_STATS;
1823 /* Priority Stats */
1824 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1825 nb = id / TXGBE_NB_UP_STATS;
1826 st = id % TXGBE_NB_UP_STATS;
1827 snprintf(name, size, "[p%u]%s", nb,
1828 rte_txgbe_up_strings[st].name);
1831 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1834 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1835 nb = id / TXGBE_NB_QP_STATS;
1836 st = id % TXGBE_NB_QP_STATS;
1837 snprintf(name, size, "[q%u]%s", nb,
1838 rte_txgbe_qp_strings[st].name);
1841 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1843 return -(int)(id + 1);
1847 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1851 /* Extended stats from txgbe_hw_stats */
1852 if (id < TXGBE_NB_HW_STATS) {
1853 *offset = rte_txgbe_stats_strings[id].offset;
1856 id -= TXGBE_NB_HW_STATS;
1858 /* Priority Stats */
1859 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1860 nb = id / TXGBE_NB_UP_STATS;
1861 st = id % TXGBE_NB_UP_STATS;
1862 *offset = rte_txgbe_up_strings[st].offset +
1863 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1866 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1869 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1870 nb = id / TXGBE_NB_QP_STATS;
1871 st = id % TXGBE_NB_QP_STATS;
1872 *offset = rte_txgbe_qp_strings[st].offset +
1873 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
1876 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1878 return -(int)(id + 1);
1881 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1882 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1884 unsigned int i, count;
1886 count = txgbe_xstats_calc_num(dev);
1887 if (xstats_names == NULL)
1890 /* Note: limit >= cnt_stats checked upstream
1891 * in rte_eth_xstats_names()
1893 limit = min(limit, count);
1895 /* Extended stats from txgbe_hw_stats */
1896 for (i = 0; i < limit; i++) {
1897 if (txgbe_get_name_by_id(i, xstats_names[i].name,
1898 sizeof(xstats_names[i].name))) {
1899 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1907 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1908 struct rte_eth_xstat_name *xstats_names,
1909 const uint64_t *ids,
1915 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
1917 for (i = 0; i < limit; i++) {
1918 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
1919 sizeof(xstats_names[i].name))) {
1920 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1929 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1932 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1933 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1934 unsigned int i, count;
1936 txgbe_read_stats_registers(hw, hw_stats);
1938 /* If this is a reset xstats is NULL, and we have cleared the
1939 * registers by reading them.
1941 count = txgbe_xstats_calc_num(dev);
1945 limit = min(limit, txgbe_xstats_calc_num(dev));
1947 /* Extended stats from txgbe_hw_stats */
1948 for (i = 0; i < limit; i++) {
1949 uint32_t offset = 0;
1951 if (txgbe_get_offset_by_id(i, &offset)) {
1952 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1955 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1963 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1966 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1967 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1968 unsigned int i, count;
1970 txgbe_read_stats_registers(hw, hw_stats);
1972 /* If this is a reset xstats is NULL, and we have cleared the
1973 * registers by reading them.
1975 count = txgbe_xstats_calc_num(dev);
1979 limit = min(limit, txgbe_xstats_calc_num(dev));
1981 /* Extended stats from txgbe_hw_stats */
1982 for (i = 0; i < limit; i++) {
1985 if (txgbe_get_offset_by_id(i, &offset)) {
1986 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1989 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1996 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1997 uint64_t *values, unsigned int limit)
1999 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2003 return txgbe_dev_xstats_get_(dev, values, limit);
2005 for (i = 0; i < limit; i++) {
2008 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2009 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2012 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2019 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2021 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2022 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2024 /* HW registers are cleared on read */
2025 hw->offset_loaded = 0;
2026 txgbe_read_stats_registers(hw, hw_stats);
2027 hw->offset_loaded = 1;
2029 /* Reset software totals */
2030 memset(hw_stats, 0, sizeof(*hw_stats));
2036 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2038 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2039 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2041 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2042 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2043 dev_info->min_rx_bufsize = 1024;
2044 dev_info->max_rx_pktlen = 15872;
2045 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2046 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2047 dev_info->max_vfs = pci_dev->max_vfs;
2048 dev_info->max_vmdq_pools = ETH_64_POOLS;
2049 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2050 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2051 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2052 dev_info->rx_queue_offload_capa);
2053 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2054 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2056 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2058 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2059 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2060 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2062 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2067 dev_info->default_txconf = (struct rte_eth_txconf) {
2069 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2070 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2071 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2073 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2077 dev_info->rx_desc_lim = rx_desc_lim;
2078 dev_info->tx_desc_lim = tx_desc_lim;
2080 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2081 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2082 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2084 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2085 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2087 /* Driver-preferred Rx/Tx parameters */
2088 dev_info->default_rxportconf.burst_size = 32;
2089 dev_info->default_txportconf.burst_size = 32;
2090 dev_info->default_rxportconf.nb_queues = 1;
2091 dev_info->default_txportconf.nb_queues = 1;
2092 dev_info->default_rxportconf.ring_size = 256;
2093 dev_info->default_txportconf.ring_size = 256;
2099 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2101 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2102 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2103 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2104 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2105 return txgbe_get_supported_ptypes();
2111 txgbe_dev_setup_link_alarm_handler(void *param)
2113 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2114 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2115 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2117 bool autoneg = false;
2119 speed = hw->phy.autoneg_advertised;
2121 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2123 hw->mac.setup_link(hw, speed, true);
2125 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2128 /* return 0 means link status changed, -1 means not changed */
2130 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2131 int wait_to_complete)
2133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2134 struct rte_eth_link link;
2135 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2136 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2141 memset(&link, 0, sizeof(link));
2142 link.link_status = ETH_LINK_DOWN;
2143 link.link_speed = ETH_SPEED_NUM_NONE;
2144 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2145 link.link_autoneg = ETH_LINK_AUTONEG;
2147 hw->mac.get_link_status = true;
2149 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2150 return rte_eth_linkstatus_set(dev, &link);
2152 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2153 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2156 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2159 link.link_speed = ETH_SPEED_NUM_100M;
2160 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2161 return rte_eth_linkstatus_set(dev, &link);
2165 if (hw->phy.media_type == txgbe_media_type_fiber) {
2166 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2167 rte_eal_alarm_set(10,
2168 txgbe_dev_setup_link_alarm_handler, dev);
2170 return rte_eth_linkstatus_set(dev, &link);
2173 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2174 link.link_status = ETH_LINK_UP;
2175 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2177 switch (link_speed) {
2179 case TXGBE_LINK_SPEED_UNKNOWN:
2180 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2181 link.link_speed = ETH_SPEED_NUM_100M;
2184 case TXGBE_LINK_SPEED_100M_FULL:
2185 link.link_speed = ETH_SPEED_NUM_100M;
2188 case TXGBE_LINK_SPEED_1GB_FULL:
2189 link.link_speed = ETH_SPEED_NUM_1G;
2192 case TXGBE_LINK_SPEED_2_5GB_FULL:
2193 link.link_speed = ETH_SPEED_NUM_2_5G;
2196 case TXGBE_LINK_SPEED_5GB_FULL:
2197 link.link_speed = ETH_SPEED_NUM_5G;
2200 case TXGBE_LINK_SPEED_10GB_FULL:
2201 link.link_speed = ETH_SPEED_NUM_10G;
2205 return rte_eth_linkstatus_set(dev, &link);
2209 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2211 return txgbe_dev_link_update_share(dev, wait_to_complete);
2215 * It clears the interrupt causes and enables the interrupt.
2216 * It will be called once only during nic initialized.
2219 * Pointer to struct rte_eth_dev.
2221 * Enable or Disable.
2224 * - On success, zero.
2225 * - On failure, a negative value.
2228 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2230 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2232 txgbe_dev_link_status_print(dev);
2234 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2236 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2242 * It clears the interrupt causes and enables the interrupt.
2243 * It will be called once only during nic initialized.
2246 * Pointer to struct rte_eth_dev.
2249 * - On success, zero.
2250 * - On failure, a negative value.
2253 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2255 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2257 intr->mask[0] |= TXGBE_ICR_MASK;
2258 intr->mask[1] |= TXGBE_ICR_MASK;
2264 * It clears the interrupt causes and enables the interrupt.
2265 * It will be called once only during nic initialized.
2268 * Pointer to struct rte_eth_dev.
2271 * - On success, zero.
2272 * - On failure, a negative value.
2275 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2277 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2279 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2285 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2288 * Pointer to struct rte_eth_dev.
2291 * - On success, zero.
2292 * - On failure, a negative value.
2295 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2298 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2299 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2301 /* clear all cause mask */
2302 txgbe_disable_intr(hw);
2304 /* read-on-clear nic registers here */
2305 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2306 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2310 /* set flag for async link update */
2311 if (eicr & TXGBE_ICRMISC_LSC)
2312 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2314 if (eicr & TXGBE_ICRMISC_VFMBX)
2315 intr->flags |= TXGBE_FLAG_MAILBOX;
2317 if (eicr & TXGBE_ICRMISC_LNKSEC)
2318 intr->flags |= TXGBE_FLAG_MACSEC;
2320 if (eicr & TXGBE_ICRMISC_GPIO)
2321 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2327 * It gets and then prints the link status.
2330 * Pointer to struct rte_eth_dev.
2333 * - On success, zero.
2334 * - On failure, a negative value.
2337 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2339 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2340 struct rte_eth_link link;
2342 rte_eth_linkstatus_get(dev, &link);
2344 if (link.link_status) {
2345 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2346 (int)(dev->data->port_id),
2347 (unsigned int)link.link_speed,
2348 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2349 "full-duplex" : "half-duplex");
2351 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2352 (int)(dev->data->port_id));
2354 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2355 pci_dev->addr.domain,
2357 pci_dev->addr.devid,
2358 pci_dev->addr.function);
2362 * It executes link_update after knowing an interrupt occurred.
2365 * Pointer to struct rte_eth_dev.
2368 * - On success, zero.
2369 * - On failure, a negative value.
2372 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2373 struct rte_intr_handle *intr_handle)
2375 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2377 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2379 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2381 if (intr->flags & TXGBE_FLAG_MAILBOX)
2382 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2384 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2385 hw->phy.handle_lasi(hw);
2386 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2389 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2390 struct rte_eth_link link;
2392 /*get the link status before link update, for predicting later*/
2393 rte_eth_linkstatus_get(dev, &link);
2395 txgbe_dev_link_update(dev, 0);
2398 if (!link.link_status)
2399 /* handle it 1 sec later, wait it being stable */
2400 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2401 /* likely to down */
2403 /* handle it 4 sec later, wait it being stable */
2404 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2406 txgbe_dev_link_status_print(dev);
2407 if (rte_eal_alarm_set(timeout * 1000,
2408 txgbe_dev_interrupt_delayed_handler,
2410 PMD_DRV_LOG(ERR, "Error setting alarm");
2412 /* remember original mask */
2413 intr->mask_misc_orig = intr->mask_misc;
2414 /* only disable lsc interrupt */
2415 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2419 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2420 txgbe_enable_intr(dev);
2421 rte_intr_enable(intr_handle);
2427 * Interrupt handler which shall be registered for alarm callback for delayed
2428 * handling specific interrupt to wait for the stable nic state. As the
2429 * NIC interrupt state is not stable for txgbe after link is just down,
2430 * it needs to wait 4 seconds to get the stable status.
2433 * Pointer to interrupt handle.
2435 * The address of parameter (struct rte_eth_dev *) registered before.
2441 txgbe_dev_interrupt_delayed_handler(void *param)
2443 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2444 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2445 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2446 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2447 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2450 txgbe_disable_intr(hw);
2452 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2454 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2455 hw->phy.handle_lasi(hw);
2456 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2459 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2460 txgbe_dev_link_update(dev, 0);
2461 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2462 txgbe_dev_link_status_print(dev);
2463 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2467 if (intr->flags & TXGBE_FLAG_MACSEC) {
2468 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2470 intr->flags &= ~TXGBE_FLAG_MACSEC;
2473 /* restore original mask */
2474 intr->mask_misc = intr->mask_misc_orig;
2475 intr->mask_misc_orig = 0;
2477 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2478 txgbe_enable_intr(dev);
2479 rte_intr_enable(intr_handle);
2483 * Interrupt handler triggered by NIC for handling
2484 * specific interrupt.
2487 * Pointer to interrupt handle.
2489 * The address of parameter (struct rte_eth_dev *) registered before.
2495 txgbe_dev_interrupt_handler(void *param)
2497 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2499 txgbe_dev_interrupt_get_status(dev);
2500 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2504 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2505 uint32_t index, uint32_t pool)
2507 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2508 uint32_t enable_addr = 1;
2510 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2515 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2517 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2519 txgbe_clear_rar(hw, index);
2523 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2525 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2527 txgbe_remove_rar(dev, 0);
2528 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2534 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2536 uint32_t vector = 0;
2538 switch (hw->mac.mc_filter_type) {
2539 case 0: /* use bits [47:36] of the address */
2540 vector = ((uc_addr->addr_bytes[4] >> 4) |
2541 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2543 case 1: /* use bits [46:35] of the address */
2544 vector = ((uc_addr->addr_bytes[4] >> 3) |
2545 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2547 case 2: /* use bits [45:34] of the address */
2548 vector = ((uc_addr->addr_bytes[4] >> 2) |
2549 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2551 case 3: /* use bits [43:32] of the address */
2552 vector = ((uc_addr->addr_bytes[4]) |
2553 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2555 default: /* Invalid mc_filter_type */
2559 /* vector can only be 12-bits or boundary will be exceeded */
2565 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2566 struct rte_ether_addr *mac_addr, uint8_t on)
2574 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2575 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2577 /* The UTA table only exists on pf hardware */
2578 if (hw->mac.type < txgbe_mac_raptor)
2581 vector = txgbe_uta_vector(hw, mac_addr);
2582 uta_idx = (vector >> 5) & 0x7F;
2583 uta_mask = 0x1UL << (vector & 0x1F);
2585 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2588 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2590 uta_info->uta_in_use++;
2591 reg_val |= uta_mask;
2592 uta_info->uta_shadow[uta_idx] |= uta_mask;
2594 uta_info->uta_in_use--;
2595 reg_val &= ~uta_mask;
2596 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2599 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2601 psrctl = rd32(hw, TXGBE_PSRCTL);
2602 if (uta_info->uta_in_use > 0)
2603 psrctl |= TXGBE_PSRCTL_UCHFENA;
2605 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2607 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2608 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2609 wr32(hw, TXGBE_PSRCTL, psrctl);
2615 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2617 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2618 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2622 /* The UTA table only exists on pf hardware */
2623 if (hw->mac.type < txgbe_mac_raptor)
2627 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2628 uta_info->uta_shadow[i] = ~0;
2629 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2632 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2633 uta_info->uta_shadow[i] = 0;
2634 wr32(hw, TXGBE_UCADDRTBL(i), 0);
2638 psrctl = rd32(hw, TXGBE_PSRCTL);
2640 psrctl |= TXGBE_PSRCTL_UCHFENA;
2642 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2644 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2645 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2646 wr32(hw, TXGBE_PSRCTL, psrctl);
2652 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2654 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2655 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2657 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2659 if (queue_id < 32) {
2660 mask = rd32(hw, TXGBE_IMS(0));
2661 mask &= (1 << queue_id);
2662 wr32(hw, TXGBE_IMS(0), mask);
2663 } else if (queue_id < 64) {
2664 mask = rd32(hw, TXGBE_IMS(1));
2665 mask &= (1 << (queue_id - 32));
2666 wr32(hw, TXGBE_IMS(1), mask);
2668 rte_intr_enable(intr_handle);
2674 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2677 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2679 if (queue_id < 32) {
2680 mask = rd32(hw, TXGBE_IMS(0));
2681 mask &= ~(1 << queue_id);
2682 wr32(hw, TXGBE_IMS(0), mask);
2683 } else if (queue_id < 64) {
2684 mask = rd32(hw, TXGBE_IMS(1));
2685 mask &= ~(1 << (queue_id - 32));
2686 wr32(hw, TXGBE_IMS(1), mask);
2693 * set the IVAR registers, mapping interrupt causes to vectors
2695 * pointer to txgbe_hw struct
2697 * 0 for Rx, 1 for Tx, -1 for other causes
2699 * queue to map the corresponding interrupt to
2701 * the vector to map to the corresponding queue
2704 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2705 uint8_t queue, uint8_t msix_vector)
2709 if (direction == -1) {
2711 msix_vector |= TXGBE_IVARMISC_VLD;
2713 tmp = rd32(hw, TXGBE_IVARMISC);
2714 tmp &= ~(0xFF << idx);
2715 tmp |= (msix_vector << idx);
2716 wr32(hw, TXGBE_IVARMISC, tmp);
2718 /* rx or tx causes */
2719 /* Workround for ICR lost */
2720 idx = ((16 * (queue & 1)) + (8 * direction));
2721 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2722 tmp &= ~(0xFF << idx);
2723 tmp |= (msix_vector << idx);
2724 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2729 * Sets up the hardware to properly generate MSI-X interrupts
2731 * board private structure
2734 txgbe_configure_msix(struct rte_eth_dev *dev)
2736 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2737 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2738 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2739 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2740 uint32_t vec = TXGBE_MISC_VEC_ID;
2743 /* won't configure msix register if no mapping is done
2744 * between intr vector and event fd
2745 * but if misx has been enabled already, need to configure
2746 * auto clean, auto mask and throttling.
2748 gpie = rd32(hw, TXGBE_GPIE);
2749 if (!rte_intr_dp_is_en(intr_handle) &&
2750 !(gpie & TXGBE_GPIE_MSIX))
2753 if (rte_intr_allow_others(intr_handle)) {
2754 base = TXGBE_RX_VEC_START;
2758 /* setup GPIE for MSI-x mode */
2759 gpie = rd32(hw, TXGBE_GPIE);
2760 gpie |= TXGBE_GPIE_MSIX;
2761 wr32(hw, TXGBE_GPIE, gpie);
2763 /* Populate the IVAR table and set the ITR values to the
2764 * corresponding register.
2766 if (rte_intr_dp_is_en(intr_handle)) {
2767 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2769 /* by default, 1:1 mapping */
2770 txgbe_set_ivar_map(hw, 0, queue_id, vec);
2771 intr_handle->intr_vec[queue_id] = vec;
2772 if (vec < base + intr_handle->nb_efd - 1)
2776 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
2778 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
2779 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2784 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
2785 u8 **mc_addr_ptr, u32 *vmdq)
2790 mc_addr = *mc_addr_ptr;
2791 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2796 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2797 struct rte_ether_addr *mc_addr_set,
2798 uint32_t nb_mc_addr)
2800 struct txgbe_hw *hw;
2803 hw = TXGBE_DEV_HW(dev);
2804 mc_addr_list = (u8 *)mc_addr_set;
2805 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2806 txgbe_dev_addr_list_itr, TRUE);
2809 static const struct eth_dev_ops txgbe_eth_dev_ops = {
2810 .dev_configure = txgbe_dev_configure,
2811 .dev_infos_get = txgbe_dev_info_get,
2812 .dev_start = txgbe_dev_start,
2813 .dev_stop = txgbe_dev_stop,
2814 .dev_set_link_up = txgbe_dev_set_link_up,
2815 .dev_set_link_down = txgbe_dev_set_link_down,
2816 .dev_close = txgbe_dev_close,
2817 .dev_reset = txgbe_dev_reset,
2818 .link_update = txgbe_dev_link_update,
2819 .stats_get = txgbe_dev_stats_get,
2820 .xstats_get = txgbe_dev_xstats_get,
2821 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
2822 .stats_reset = txgbe_dev_stats_reset,
2823 .xstats_reset = txgbe_dev_xstats_reset,
2824 .xstats_get_names = txgbe_dev_xstats_get_names,
2825 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
2826 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
2827 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
2828 .vlan_filter_set = txgbe_vlan_filter_set,
2829 .vlan_tpid_set = txgbe_vlan_tpid_set,
2830 .vlan_offload_set = txgbe_vlan_offload_set,
2831 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
2832 .rx_queue_start = txgbe_dev_rx_queue_start,
2833 .rx_queue_stop = txgbe_dev_rx_queue_stop,
2834 .tx_queue_start = txgbe_dev_tx_queue_start,
2835 .tx_queue_stop = txgbe_dev_tx_queue_stop,
2836 .rx_queue_setup = txgbe_dev_rx_queue_setup,
2837 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
2838 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
2839 .rx_queue_release = txgbe_dev_rx_queue_release,
2840 .tx_queue_setup = txgbe_dev_tx_queue_setup,
2841 .tx_queue_release = txgbe_dev_tx_queue_release,
2842 .mac_addr_add = txgbe_add_rar,
2843 .mac_addr_remove = txgbe_remove_rar,
2844 .mac_addr_set = txgbe_set_default_mac_addr,
2845 .uc_hash_table_set = txgbe_uc_hash_table_set,
2846 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
2847 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
2848 .rxq_info_get = txgbe_rxq_info_get,
2849 .txq_info_get = txgbe_txq_info_get,
2852 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2853 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2854 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2856 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2857 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2859 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2860 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2862 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2863 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2866 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2867 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);