1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41 struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (h)->bitmap[idx] |= 1 << bit;\
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55 (h)->bitmap[idx] &= ~(1 << bit);\
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61 (r) = (h)->bitmap[idx] >> bit & 1;\
65 * The set of PCI devices this driver supports
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70 { .vendor_id = 0, /* sentinel */ },
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74 .nb_max = TXGBE_RING_DESC_MAX,
75 .nb_min = TXGBE_RING_DESC_MIN,
76 .nb_align = TXGBE_RXD_ALIGN,
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80 .nb_max = TXGBE_RING_DESC_MAX,
81 .nb_min = TXGBE_RING_DESC_MIN,
82 .nb_align = TXGBE_TXD_ALIGN,
83 .nb_seg_max = TXGBE_TX_MAX_SEG,
84 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
93 HW_XSTAT(mng_bmc2host_packets),
94 HW_XSTAT(mng_host2bmc_packets),
100 HW_XSTAT(rx_total_bytes),
101 HW_XSTAT(rx_total_packets),
102 HW_XSTAT(tx_total_packets),
103 HW_XSTAT(rx_total_missed_packets),
104 HW_XSTAT(rx_broadcast_packets),
105 HW_XSTAT(rx_multicast_packets),
106 HW_XSTAT(rx_management_packets),
107 HW_XSTAT(tx_management_packets),
108 HW_XSTAT(rx_management_dropped),
111 HW_XSTAT(rx_crc_errors),
112 HW_XSTAT(rx_illegal_byte_errors),
113 HW_XSTAT(rx_error_bytes),
114 HW_XSTAT(rx_mac_short_packet_dropped),
115 HW_XSTAT(rx_length_errors),
116 HW_XSTAT(rx_undersize_errors),
117 HW_XSTAT(rx_fragment_errors),
118 HW_XSTAT(rx_oversize_errors),
119 HW_XSTAT(rx_jabber_errors),
120 HW_XSTAT(rx_l3_l4_xsum_error),
121 HW_XSTAT(mac_local_errors),
122 HW_XSTAT(mac_remote_errors),
125 HW_XSTAT(flow_director_added_filters),
126 HW_XSTAT(flow_director_removed_filters),
127 HW_XSTAT(flow_director_filter_add_errors),
128 HW_XSTAT(flow_director_filter_remove_errors),
129 HW_XSTAT(flow_director_matched_filters),
130 HW_XSTAT(flow_director_missed_filters),
133 HW_XSTAT(rx_fcoe_crc_errors),
134 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135 HW_XSTAT(rx_fcoe_dropped),
136 HW_XSTAT(rx_fcoe_packets),
137 HW_XSTAT(tx_fcoe_packets),
138 HW_XSTAT(rx_fcoe_bytes),
139 HW_XSTAT(tx_fcoe_bytes),
140 HW_XSTAT(rx_fcoe_no_ddp),
141 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
144 HW_XSTAT(tx_macsec_pkts_untagged),
145 HW_XSTAT(tx_macsec_pkts_encrypted),
146 HW_XSTAT(tx_macsec_pkts_protected),
147 HW_XSTAT(tx_macsec_octets_encrypted),
148 HW_XSTAT(tx_macsec_octets_protected),
149 HW_XSTAT(rx_macsec_pkts_untagged),
150 HW_XSTAT(rx_macsec_pkts_badtag),
151 HW_XSTAT(rx_macsec_pkts_nosci),
152 HW_XSTAT(rx_macsec_pkts_unknownsci),
153 HW_XSTAT(rx_macsec_octets_decrypted),
154 HW_XSTAT(rx_macsec_octets_validated),
155 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156 HW_XSTAT(rx_macsec_sc_pkts_delayed),
157 HW_XSTAT(rx_macsec_sc_pkts_late),
158 HW_XSTAT(rx_macsec_sa_pkts_ok),
159 HW_XSTAT(rx_macsec_sa_pkts_invalid),
160 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
165 HW_XSTAT(rx_size_64_packets),
166 HW_XSTAT(rx_size_65_to_127_packets),
167 HW_XSTAT(rx_size_128_to_255_packets),
168 HW_XSTAT(rx_size_256_to_511_packets),
169 HW_XSTAT(rx_size_512_to_1023_packets),
170 HW_XSTAT(rx_size_1024_to_max_packets),
171 HW_XSTAT(tx_size_64_packets),
172 HW_XSTAT(tx_size_65_to_127_packets),
173 HW_XSTAT(tx_size_128_to_255_packets),
174 HW_XSTAT(tx_size_256_to_511_packets),
175 HW_XSTAT(tx_size_512_to_1023_packets),
176 HW_XSTAT(tx_size_1024_to_max_packets),
179 HW_XSTAT(tx_xon_packets),
180 HW_XSTAT(rx_xon_packets),
181 HW_XSTAT(tx_xoff_packets),
182 HW_XSTAT(rx_xoff_packets),
184 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191 sizeof(rte_txgbe_stats_strings[0]))
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196 UP_XSTAT(rx_up_packets),
197 UP_XSTAT(tx_up_packets),
198 UP_XSTAT(rx_up_bytes),
199 UP_XSTAT(tx_up_bytes),
200 UP_XSTAT(rx_up_drop_packets),
202 UP_XSTAT(tx_up_xon_packets),
203 UP_XSTAT(rx_up_xon_packets),
204 UP_XSTAT(tx_up_xoff_packets),
205 UP_XSTAT(rx_up_xoff_packets),
206 UP_XSTAT(rx_up_dropped),
207 UP_XSTAT(rx_up_mbuf_alloc_errors),
208 UP_XSTAT(tx_up_xon2off_packets),
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212 sizeof(rte_txgbe_up_strings[0]))
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217 QP_XSTAT(rx_qp_packets),
218 QP_XSTAT(tx_qp_packets),
219 QP_XSTAT(rx_qp_bytes),
220 QP_XSTAT(tx_qp_bytes),
221 QP_XSTAT(rx_qp_mc_packets),
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225 sizeof(rte_txgbe_qp_strings[0]))
228 txgbe_is_sfp(struct txgbe_hw *hw)
230 switch (hw->phy.type) {
231 case txgbe_phy_sfp_avago:
232 case txgbe_phy_sfp_ftl:
233 case txgbe_phy_sfp_intel:
234 case txgbe_phy_sfp_unknown:
235 case txgbe_phy_sfp_tyco_passive:
236 case txgbe_phy_sfp_unknown_passive:
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
249 status = hw->mac.reset_hw(hw);
251 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
257 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
263 txgbe_enable_intr(struct rte_eth_dev *dev)
265 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
268 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
275 txgbe_disable_intr(struct txgbe_hw *hw)
277 PMD_INIT_FUNC_TRACE();
279 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
291 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292 struct txgbe_stat_mappings *stat_mappings =
293 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294 uint32_t qsmr_mask = 0;
295 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
299 if (hw->mac.type != txgbe_mac_raptor)
302 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
305 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
309 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310 if (n >= TXGBE_NB_STAT_MAPPING) {
311 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
314 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
316 /* Now clear any previous stat_idx set */
317 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
319 stat_mappings->tqsm[n] &= ~clearing_mask;
321 stat_mappings->rqsm[n] &= ~clearing_mask;
323 q_map = (uint32_t)stat_idx;
324 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
327 stat_mappings->tqsm[n] |= qsmr_mask;
329 stat_mappings->rqsm[n] |= qsmr_mask;
331 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
334 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
340 * Ensure that all locks are released before first NVM or PHY access
343 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
348 * These ones are more tricky since they are common to all ports; but
349 * swfw_sync retries last long enough (1s) to be almost sure that if
350 * lock can not be taken it is due to an improper lock of the
353 mask = TXGBE_MNGSEM_SWPHY |
355 TXGBE_MNGSEM_SWFLASH;
356 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
357 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
359 hw->mac.release_swfw_sync(hw, mask);
363 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
365 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
366 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
368 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
369 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
370 const struct rte_memzone *mz;
375 PMD_INIT_FUNC_TRACE();
377 eth_dev->dev_ops = &txgbe_eth_dev_ops;
378 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
379 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
380 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
383 * For secondary processes, we don't initialise any further as primary
384 * has already done this work. Only check we don't need a different
385 * RX and TX function.
387 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
388 struct txgbe_tx_queue *txq;
389 /* TX queue function in primary, set by last queue initialized
390 * Tx queue may not initialized by primary process
392 if (eth_dev->data->tx_queues) {
393 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
394 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
395 txgbe_set_tx_function(eth_dev, txq);
397 /* Use default TX function if we get here */
398 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
399 "Using default TX function.");
402 txgbe_set_rx_function(eth_dev);
407 rte_eth_copy_pci_info(eth_dev, pci_dev);
409 /* Vendor and Device ID need to be set before init of shared code */
410 hw->device_id = pci_dev->id.device_id;
411 hw->vendor_id = pci_dev->id.vendor_id;
412 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
413 hw->allow_unsupported_sfp = 1;
415 /* Reserve memory for interrupt status block */
416 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
417 16, TXGBE_ALIGN, SOCKET_ID_ANY);
421 hw->isb_dma = TMZ_PADDR(mz);
422 hw->isb_mem = TMZ_VADDR(mz);
424 /* Initialize the shared code (base driver) */
425 err = txgbe_init_shared_code(hw);
427 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
431 /* Unlock any pending hardware semaphore */
432 txgbe_swfw_lock_reset(hw);
434 err = hw->rom.init_params(hw);
436 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
440 /* Make sure we have a good EEPROM before we read from it */
441 err = hw->rom.validate_checksum(hw, &csum);
443 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
447 err = hw->mac.init_hw(hw);
450 * Devices with copper phys will fail to initialise if txgbe_init_hw()
451 * is called too soon after the kernel driver unbinding/binding occurs.
452 * The failure occurs in txgbe_identify_phy() for all devices,
453 * but for non-copper devies, txgbe_identify_sfp_module() is
454 * also called. See txgbe_identify_phy(). The reason for the
455 * failure is not known, and only occuts when virtualisation features
456 * are disabled in the bios. A delay of 200ms was found to be enough by
457 * trial-and-error, and is doubled to be safe.
459 if (err && hw->phy.media_type == txgbe_media_type_copper) {
461 err = hw->mac.init_hw(hw);
464 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
467 if (err == TXGBE_ERR_EEPROM_VERSION) {
468 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
469 "LOM. Please be aware there may be issues associated "
470 "with your hardware.");
471 PMD_INIT_LOG(ERR, "If you are experiencing problems "
472 "please contact your hardware representative "
473 "who provided you with this hardware.");
474 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
475 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
478 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
482 /* Reset the hw statistics */
483 txgbe_dev_stats_reset(eth_dev);
485 /* disable interrupt */
486 txgbe_disable_intr(hw);
488 /* Allocate memory for storing MAC addresses */
489 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
490 hw->mac.num_rar_entries, 0);
491 if (eth_dev->data->mac_addrs == NULL) {
493 "Failed to allocate %u bytes needed to store "
495 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
499 /* Copy the permanent MAC address */
500 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
501 ð_dev->data->mac_addrs[0]);
503 /* Allocate memory for storing hash filter MAC addresses */
504 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
505 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
506 if (eth_dev->data->hash_mac_addrs == NULL) {
508 "Failed to allocate %d bytes needed to store MAC addresses",
509 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
513 /* initialize the vfta */
514 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
516 /* initialize the hw strip bitmap*/
517 memset(hwstrip, 0, sizeof(*hwstrip));
519 /* initialize PF if max_vfs not zero */
520 txgbe_pf_host_init(eth_dev);
522 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
523 /* let hardware know driver is loaded */
524 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
525 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
526 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
527 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
530 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
531 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
532 (int)hw->mac.type, (int)hw->phy.type,
533 (int)hw->phy.sfp_type);
535 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
536 (int)hw->mac.type, (int)hw->phy.type);
538 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
539 eth_dev->data->port_id, pci_dev->id.vendor_id,
540 pci_dev->id.device_id);
542 rte_intr_callback_register(intr_handle,
543 txgbe_dev_interrupt_handler, eth_dev);
545 /* enable uio/vfio intr/eventfd mapping */
546 rte_intr_enable(intr_handle);
548 /* enable support intr */
549 txgbe_enable_intr(eth_dev);
555 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
557 PMD_INIT_FUNC_TRACE();
559 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562 txgbe_dev_close(eth_dev);
568 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
569 struct rte_pci_device *pci_dev)
571 struct rte_eth_dev *pf_ethdev;
572 struct rte_eth_devargs eth_da;
575 if (pci_dev->device.devargs) {
576 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
581 memset(ð_da, 0, sizeof(eth_da));
584 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
585 sizeof(struct txgbe_adapter),
586 eth_dev_pci_specific_init, pci_dev,
587 eth_txgbe_dev_init, NULL);
589 if (retval || eth_da.nb_representor_ports < 1)
592 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
593 if (pf_ethdev == NULL)
599 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
601 struct rte_eth_dev *ethdev;
603 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
607 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
610 static struct rte_pci_driver rte_txgbe_pmd = {
611 .id_table = pci_id_txgbe_map,
612 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
613 RTE_PCI_DRV_INTR_LSC,
614 .probe = eth_txgbe_pci_probe,
615 .remove = eth_txgbe_pci_remove,
619 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
621 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
622 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
627 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
628 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
629 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
634 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
636 /* update local VFTA copy */
637 shadow_vfta->vfta[vid_idx] = vfta;
643 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
645 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
646 struct txgbe_rx_queue *rxq;
648 uint32_t rxcfg, rxbal, rxbah;
651 txgbe_vlan_hw_strip_enable(dev, queue);
653 txgbe_vlan_hw_strip_disable(dev, queue);
655 rxq = dev->data->rx_queues[queue];
656 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
657 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
658 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
659 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
660 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
661 !(rxcfg & TXGBE_RXCFG_VLAN);
662 rxcfg |= TXGBE_RXCFG_VLAN;
664 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
665 (rxcfg & TXGBE_RXCFG_VLAN);
666 rxcfg &= ~TXGBE_RXCFG_VLAN;
668 rxcfg &= ~TXGBE_RXCFG_ENA;
671 /* set vlan strip for ring */
672 txgbe_dev_rx_queue_stop(dev, queue);
673 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
674 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
675 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
676 txgbe_dev_rx_queue_start(dev, queue);
681 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
682 enum rte_vlan_type vlan_type,
685 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
687 uint32_t portctrl, vlan_ext, qinq;
689 portctrl = rd32(hw, TXGBE_PORTCTL);
691 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
692 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
694 case ETH_VLAN_TYPE_INNER:
696 wr32m(hw, TXGBE_VLANCTL,
697 TXGBE_VLANCTL_TPID_MASK,
698 TXGBE_VLANCTL_TPID(tpid));
699 wr32m(hw, TXGBE_DMATXCTRL,
700 TXGBE_DMATXCTRL_TPID_MASK,
701 TXGBE_DMATXCTRL_TPID(tpid));
704 PMD_DRV_LOG(ERR, "Inner type is not supported"
709 wr32m(hw, TXGBE_TAGTPID(0),
710 TXGBE_TAGTPID_LSB_MASK,
711 TXGBE_TAGTPID_LSB(tpid));
714 case ETH_VLAN_TYPE_OUTER:
716 /* Only the high 16-bits is valid */
717 wr32m(hw, TXGBE_EXTAG,
718 TXGBE_EXTAG_VLAN_MASK,
719 TXGBE_EXTAG_VLAN(tpid));
721 wr32m(hw, TXGBE_VLANCTL,
722 TXGBE_VLANCTL_TPID_MASK,
723 TXGBE_VLANCTL_TPID(tpid));
724 wr32m(hw, TXGBE_DMATXCTRL,
725 TXGBE_DMATXCTRL_TPID_MASK,
726 TXGBE_DMATXCTRL_TPID(tpid));
730 wr32m(hw, TXGBE_TAGTPID(0),
731 TXGBE_TAGTPID_MSB_MASK,
732 TXGBE_TAGTPID_MSB(tpid));
736 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
744 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
746 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
749 PMD_INIT_FUNC_TRACE();
751 /* Filter Table Disable */
752 vlnctrl = rd32(hw, TXGBE_VLANCTL);
753 vlnctrl &= ~TXGBE_VLANCTL_VFE;
754 wr32(hw, TXGBE_VLANCTL, vlnctrl);
758 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
760 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
761 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
765 PMD_INIT_FUNC_TRACE();
767 /* Filter Table Enable */
768 vlnctrl = rd32(hw, TXGBE_VLANCTL);
769 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
770 vlnctrl |= TXGBE_VLANCTL_VFE;
771 wr32(hw, TXGBE_VLANCTL, vlnctrl);
773 /* write whatever is in local vfta copy */
774 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
775 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
779 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
781 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
782 struct txgbe_rx_queue *rxq;
784 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
788 TXGBE_SET_HWSTRIP(hwstrip, queue);
790 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
792 if (queue >= dev->data->nb_rx_queues)
795 rxq = dev->data->rx_queues[queue];
798 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
799 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
801 rxq->vlan_flags = PKT_RX_VLAN;
802 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
807 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
809 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
812 PMD_INIT_FUNC_TRACE();
814 ctrl = rd32(hw, TXGBE_RXCFG(queue));
815 ctrl &= ~TXGBE_RXCFG_VLAN;
816 wr32(hw, TXGBE_RXCFG(queue), ctrl);
818 /* record those setting for HW strip per queue */
819 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
823 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
825 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
828 PMD_INIT_FUNC_TRACE();
830 ctrl = rd32(hw, TXGBE_RXCFG(queue));
831 ctrl |= TXGBE_RXCFG_VLAN;
832 wr32(hw, TXGBE_RXCFG(queue), ctrl);
834 /* record those setting for HW strip per queue */
835 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
839 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
841 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
844 PMD_INIT_FUNC_TRACE();
846 ctrl = rd32(hw, TXGBE_PORTCTL);
847 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
848 ctrl &= ~TXGBE_PORTCTL_QINQ;
849 wr32(hw, TXGBE_PORTCTL, ctrl);
853 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
855 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
856 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
857 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
860 PMD_INIT_FUNC_TRACE();
862 ctrl = rd32(hw, TXGBE_PORTCTL);
863 ctrl |= TXGBE_PORTCTL_VLANEXT;
864 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
865 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
866 ctrl |= TXGBE_PORTCTL_QINQ;
867 wr32(hw, TXGBE_PORTCTL, ctrl);
871 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
873 struct txgbe_rx_queue *rxq;
876 PMD_INIT_FUNC_TRACE();
878 for (i = 0; i < dev->data->nb_rx_queues; i++) {
879 rxq = dev->data->rx_queues[i];
881 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
882 txgbe_vlan_strip_queue_set(dev, i, 1);
884 txgbe_vlan_strip_queue_set(dev, i, 0);
889 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
892 struct rte_eth_rxmode *rxmode;
893 struct txgbe_rx_queue *rxq;
895 if (mask & ETH_VLAN_STRIP_MASK) {
896 rxmode = &dev->data->dev_conf.rxmode;
897 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
898 for (i = 0; i < dev->data->nb_rx_queues; i++) {
899 rxq = dev->data->rx_queues[i];
900 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
903 for (i = 0; i < dev->data->nb_rx_queues; i++) {
904 rxq = dev->data->rx_queues[i];
905 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
911 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
913 struct rte_eth_rxmode *rxmode;
914 rxmode = &dev->data->dev_conf.rxmode;
916 if (mask & ETH_VLAN_STRIP_MASK)
917 txgbe_vlan_hw_strip_config(dev);
919 if (mask & ETH_VLAN_FILTER_MASK) {
920 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
921 txgbe_vlan_hw_filter_enable(dev);
923 txgbe_vlan_hw_filter_disable(dev);
926 if (mask & ETH_VLAN_EXTEND_MASK) {
927 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
928 txgbe_vlan_hw_extend_enable(dev);
930 txgbe_vlan_hw_extend_disable(dev);
937 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
939 txgbe_config_vlan_strip_on_all_queues(dev, mask);
941 txgbe_vlan_offload_config(dev, mask);
947 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
949 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
950 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
951 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
953 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
954 wr32(hw, TXGBE_VLANCTL, vlanctrl);
958 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
960 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
965 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
968 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
974 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
975 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
976 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
977 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
982 txgbe_check_mq_mode(struct rte_eth_dev *dev)
984 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
985 uint16_t nb_rx_q = dev->data->nb_rx_queues;
986 uint16_t nb_tx_q = dev->data->nb_tx_queues;
988 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
989 /* check multi-queue mode */
990 switch (dev_conf->rxmode.mq_mode) {
991 case ETH_MQ_RX_VMDQ_DCB:
992 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
994 case ETH_MQ_RX_VMDQ_DCB_RSS:
995 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
996 PMD_INIT_LOG(ERR, "SRIOV active,"
997 " unsupported mq_mode rx %d.",
998 dev_conf->rxmode.mq_mode);
1001 case ETH_MQ_RX_VMDQ_RSS:
1002 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1003 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1004 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1005 PMD_INIT_LOG(ERR, "SRIOV is active,"
1006 " invalid queue number"
1007 " for VMDQ RSS, allowed"
1008 " value are 1, 2 or 4.");
1012 case ETH_MQ_RX_VMDQ_ONLY:
1013 case ETH_MQ_RX_NONE:
1014 /* if nothing mq mode configure, use default scheme */
1015 dev->data->dev_conf.rxmode.mq_mode =
1016 ETH_MQ_RX_VMDQ_ONLY;
1018 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1019 /* SRIOV only works in VMDq enable mode */
1020 PMD_INIT_LOG(ERR, "SRIOV is active,"
1021 " wrong mq_mode rx %d.",
1022 dev_conf->rxmode.mq_mode);
1026 switch (dev_conf->txmode.mq_mode) {
1027 case ETH_MQ_TX_VMDQ_DCB:
1028 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1029 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1031 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1032 dev->data->dev_conf.txmode.mq_mode =
1033 ETH_MQ_TX_VMDQ_ONLY;
1037 /* check valid queue number */
1038 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1039 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1040 PMD_INIT_LOG(ERR, "SRIOV is active,"
1041 " nb_rx_q=%d nb_tx_q=%d queue number"
1042 " must be less than or equal to %d.",
1044 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1048 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1049 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1053 /* check configuration for vmdb+dcb mode */
1054 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1055 const struct rte_eth_vmdq_dcb_conf *conf;
1057 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1058 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1059 TXGBE_VMDQ_DCB_NB_QUEUES);
1062 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1063 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1064 conf->nb_queue_pools == ETH_32_POOLS)) {
1065 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1066 " nb_queue_pools must be %d or %d.",
1067 ETH_16_POOLS, ETH_32_POOLS);
1071 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1072 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1074 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1075 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1076 TXGBE_VMDQ_DCB_NB_QUEUES);
1079 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1080 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1081 conf->nb_queue_pools == ETH_32_POOLS)) {
1082 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1083 " nb_queue_pools != %d and"
1084 " nb_queue_pools != %d.",
1085 ETH_16_POOLS, ETH_32_POOLS);
1090 /* For DCB mode check our configuration before we go further */
1091 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1092 const struct rte_eth_dcb_rx_conf *conf;
1094 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1095 if (!(conf->nb_tcs == ETH_4_TCS ||
1096 conf->nb_tcs == ETH_8_TCS)) {
1097 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1098 " and nb_tcs != %d.",
1099 ETH_4_TCS, ETH_8_TCS);
1104 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1105 const struct rte_eth_dcb_tx_conf *conf;
1107 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1108 if (!(conf->nb_tcs == ETH_4_TCS ||
1109 conf->nb_tcs == ETH_8_TCS)) {
1110 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1111 " and nb_tcs != %d.",
1112 ETH_4_TCS, ETH_8_TCS);
1121 txgbe_dev_configure(struct rte_eth_dev *dev)
1123 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1124 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1127 PMD_INIT_FUNC_TRACE();
1129 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1130 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1132 /* multiple queue mode checking */
1133 ret = txgbe_check_mq_mode(dev);
1135 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1140 /* set flag to update link status after init */
1141 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1144 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1145 * allocation Rx preconditions we will reset it.
1147 adapter->rx_bulk_alloc_allowed = true;
1153 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1155 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1156 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1159 gpie = rd32(hw, TXGBE_GPIOINTEN);
1160 gpie |= TXGBE_GPIOBIT_6;
1161 wr32(hw, TXGBE_GPIOINTEN, gpie);
1162 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1166 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1167 uint16_t tx_rate, uint64_t q_msk)
1169 struct txgbe_hw *hw;
1170 struct txgbe_vf_info *vfinfo;
1171 struct rte_eth_link link;
1172 uint8_t nb_q_per_pool;
1173 uint32_t queue_stride;
1174 uint32_t queue_idx, idx = 0, vf_idx;
1176 uint16_t total_rate = 0;
1177 struct rte_pci_device *pci_dev;
1180 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1181 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1185 if (vf >= pci_dev->max_vfs)
1188 if (tx_rate > link.link_speed)
1194 hw = TXGBE_DEV_HW(dev);
1195 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1196 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1197 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1198 queue_idx = vf * queue_stride;
1199 queue_end = queue_idx + nb_q_per_pool - 1;
1200 if (queue_end >= hw->mac.max_tx_queues)
1204 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1207 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1209 total_rate += vfinfo[vf_idx].tx_rate[idx];
1215 /* Store tx_rate for this vf. */
1216 for (idx = 0; idx < nb_q_per_pool; idx++) {
1217 if (((uint64_t)0x1 << idx) & q_msk) {
1218 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1219 vfinfo[vf].tx_rate[idx] = tx_rate;
1220 total_rate += tx_rate;
1224 if (total_rate > dev->data->dev_link.link_speed) {
1225 /* Reset stored TX rate of the VF if it causes exceed
1228 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1232 /* Set ARBTXRATE of each queue/pool for vf X */
1233 for (; queue_idx <= queue_end; queue_idx++) {
1235 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1243 * Configure device link speed and setup link.
1244 * It returns 0 on success.
1247 txgbe_dev_start(struct rte_eth_dev *dev)
1249 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1250 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1251 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1252 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1253 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1254 uint32_t intr_vector = 0;
1256 bool link_up = false, negotiate = 0;
1258 uint32_t allowed_speeds = 0;
1262 uint32_t *link_speeds;
1264 PMD_INIT_FUNC_TRACE();
1266 /* TXGBE devices don't support:
1267 * - half duplex (checked afterwards for valid speeds)
1268 * - fixed speed: TODO implement
1270 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1272 "Invalid link_speeds for port %u, fix speed not supported",
1273 dev->data->port_id);
1277 /* Stop the link setup handler before resetting the HW. */
1278 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1280 /* disable uio/vfio intr/eventfd mapping */
1281 rte_intr_disable(intr_handle);
1284 hw->adapter_stopped = 0;
1287 /* reinitialize adapter
1288 * this calls reset and start
1290 hw->nb_rx_queues = dev->data->nb_rx_queues;
1291 hw->nb_tx_queues = dev->data->nb_tx_queues;
1292 status = txgbe_pf_reset_hw(hw);
1295 hw->mac.start_hw(hw);
1296 hw->mac.get_link_status = true;
1298 /* configure PF module if SRIOV enabled */
1299 txgbe_pf_host_configure(dev);
1301 txgbe_dev_phy_intr_setup(dev);
1303 /* check and configure queue intr-vector mapping */
1304 if ((rte_intr_cap_multiple(intr_handle) ||
1305 !RTE_ETH_DEV_SRIOV(dev).active) &&
1306 dev->data->dev_conf.intr_conf.rxq != 0) {
1307 intr_vector = dev->data->nb_rx_queues;
1308 if (rte_intr_efd_enable(intr_handle, intr_vector))
1312 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1313 intr_handle->intr_vec =
1314 rte_zmalloc("intr_vec",
1315 dev->data->nb_rx_queues * sizeof(int), 0);
1316 if (intr_handle->intr_vec == NULL) {
1317 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1318 " intr_vec", dev->data->nb_rx_queues);
1323 /* confiugre msix for sleep until rx interrupt */
1324 txgbe_configure_msix(dev);
1326 /* initialize transmission unit */
1327 txgbe_dev_tx_init(dev);
1329 /* This can fail when allocating mbufs for descriptor rings */
1330 err = txgbe_dev_rx_init(dev);
1332 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1336 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1337 ETH_VLAN_EXTEND_MASK;
1338 err = txgbe_vlan_offload_config(dev, mask);
1340 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1344 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1345 /* Enable vlan filtering for VMDq */
1346 txgbe_vmdq_vlan_hw_filter_enable(dev);
1349 /* Restore vf rate limit */
1350 if (vfinfo != NULL) {
1351 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1352 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1353 if (vfinfo[vf].tx_rate[idx] != 0)
1354 txgbe_set_vf_rate_limit(dev, vf,
1355 vfinfo[vf].tx_rate[idx],
1359 err = txgbe_dev_rxtx_start(dev);
1361 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1365 /* Skip link setup if loopback mode is enabled. */
1366 if (hw->mac.type == txgbe_mac_raptor &&
1367 dev->data->dev_conf.lpbk_mode)
1368 goto skip_link_setup;
1370 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1371 err = hw->mac.setup_sfp(hw);
1376 if (hw->phy.media_type == txgbe_media_type_copper) {
1377 /* Turn on the copper */
1378 hw->phy.set_phy_power(hw, true);
1380 /* Turn on the laser */
1381 hw->mac.enable_tx_laser(hw);
1384 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1387 dev->data->dev_link.link_status = link_up;
1389 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1393 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1396 link_speeds = &dev->data->dev_conf.link_speeds;
1397 if (*link_speeds & ~allowed_speeds) {
1398 PMD_INIT_LOG(ERR, "Invalid link setting");
1403 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1404 speed = (TXGBE_LINK_SPEED_100M_FULL |
1405 TXGBE_LINK_SPEED_1GB_FULL |
1406 TXGBE_LINK_SPEED_10GB_FULL);
1408 if (*link_speeds & ETH_LINK_SPEED_10G)
1409 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1410 if (*link_speeds & ETH_LINK_SPEED_5G)
1411 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1412 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1413 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1414 if (*link_speeds & ETH_LINK_SPEED_1G)
1415 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1416 if (*link_speeds & ETH_LINK_SPEED_100M)
1417 speed |= TXGBE_LINK_SPEED_100M_FULL;
1420 err = hw->mac.setup_link(hw, speed, link_up);
1426 if (rte_intr_allow_others(intr_handle)) {
1427 /* check if lsc interrupt is enabled */
1428 if (dev->data->dev_conf.intr_conf.lsc != 0)
1429 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1431 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1432 txgbe_dev_macsec_interrupt_setup(dev);
1433 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1435 rte_intr_callback_unregister(intr_handle,
1436 txgbe_dev_interrupt_handler, dev);
1437 if (dev->data->dev_conf.intr_conf.lsc != 0)
1438 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1439 " no intr multiplex");
1442 /* check if rxq interrupt is enabled */
1443 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1444 rte_intr_dp_is_en(intr_handle))
1445 txgbe_dev_rxq_interrupt_setup(dev);
1447 /* enable uio/vfio intr/eventfd mapping */
1448 rte_intr_enable(intr_handle);
1450 /* resume enabled intr since hw reset */
1451 txgbe_enable_intr(dev);
1454 * Update link status right before return, because it may
1455 * start link configuration process in a separate thread.
1457 txgbe_dev_link_update(dev, 0);
1459 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1461 txgbe_read_stats_registers(hw, hw_stats);
1462 hw->offset_loaded = 1;
1467 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1468 txgbe_dev_clear_queues(dev);
1473 * Stop device: disable rx and tx functions to allow for reconfiguring.
1476 txgbe_dev_stop(struct rte_eth_dev *dev)
1478 struct rte_eth_link link;
1479 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1480 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1481 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1482 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1483 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1486 if (hw->adapter_stopped)
1489 PMD_INIT_FUNC_TRACE();
1491 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1493 /* disable interrupts */
1494 txgbe_disable_intr(hw);
1497 txgbe_pf_reset_hw(hw);
1498 hw->adapter_stopped = 0;
1503 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1504 vfinfo[vf].clear_to_send = false;
1506 if (hw->phy.media_type == txgbe_media_type_copper) {
1507 /* Turn off the copper */
1508 hw->phy.set_phy_power(hw, false);
1510 /* Turn off the laser */
1511 hw->mac.disable_tx_laser(hw);
1514 txgbe_dev_clear_queues(dev);
1516 /* Clear stored conf */
1517 dev->data->scattered_rx = 0;
1520 /* Clear recorded link status */
1521 memset(&link, 0, sizeof(link));
1522 rte_eth_linkstatus_set(dev, &link);
1524 if (!rte_intr_allow_others(intr_handle))
1525 /* resume to the default handler */
1526 rte_intr_callback_register(intr_handle,
1527 txgbe_dev_interrupt_handler,
1530 /* Clean datapath event and queue/vec mapping */
1531 rte_intr_efd_disable(intr_handle);
1532 if (intr_handle->intr_vec != NULL) {
1533 rte_free(intr_handle->intr_vec);
1534 intr_handle->intr_vec = NULL;
1537 adapter->rss_reta_updated = 0;
1538 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1540 hw->adapter_stopped = true;
1541 dev->data->dev_started = 0;
1547 * Set device link up: enable tx.
1550 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1552 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1554 if (hw->phy.media_type == txgbe_media_type_copper) {
1555 /* Turn on the copper */
1556 hw->phy.set_phy_power(hw, true);
1558 /* Turn on the laser */
1559 hw->mac.enable_tx_laser(hw);
1560 txgbe_dev_link_update(dev, 0);
1567 * Set device link down: disable tx.
1570 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1572 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1574 if (hw->phy.media_type == txgbe_media_type_copper) {
1575 /* Turn off the copper */
1576 hw->phy.set_phy_power(hw, false);
1578 /* Turn off the laser */
1579 hw->mac.disable_tx_laser(hw);
1580 txgbe_dev_link_update(dev, 0);
1587 * Reset and stop device.
1590 txgbe_dev_close(struct rte_eth_dev *dev)
1592 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1593 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1594 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1598 PMD_INIT_FUNC_TRACE();
1600 txgbe_pf_reset_hw(hw);
1602 ret = txgbe_dev_stop(dev);
1604 txgbe_dev_free_queues(dev);
1606 /* reprogram the RAR[0] in case user changed it. */
1607 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1609 /* Unlock any pending hardware semaphore */
1610 txgbe_swfw_lock_reset(hw);
1612 /* disable uio intr before callback unregister */
1613 rte_intr_disable(intr_handle);
1616 ret = rte_intr_callback_unregister(intr_handle,
1617 txgbe_dev_interrupt_handler, dev);
1618 if (ret >= 0 || ret == -ENOENT) {
1620 } else if (ret != -EAGAIN) {
1622 "intr callback unregister failed: %d",
1626 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1628 /* cancel the delay handler before remove dev */
1629 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1631 /* uninitialize PF if max_vfs not zero */
1632 txgbe_pf_host_uninit(dev);
1634 rte_free(dev->data->mac_addrs);
1635 dev->data->mac_addrs = NULL;
1637 rte_free(dev->data->hash_mac_addrs);
1638 dev->data->hash_mac_addrs = NULL;
1647 txgbe_dev_reset(struct rte_eth_dev *dev)
1651 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1652 * its VF to make them align with it. The detailed notification
1653 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1654 * To avoid unexpected behavior in VF, currently reset of PF with
1655 * SR-IOV activation is not supported. It might be supported later.
1657 if (dev->data->sriov.active)
1660 ret = eth_txgbe_dev_uninit(dev);
1664 ret = eth_txgbe_dev_init(dev, NULL);
1669 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1671 uint32_t current_counter = rd32(hw, reg); \
1672 if (current_counter < last_counter) \
1673 current_counter += 0x100000000LL; \
1674 if (!hw->offset_loaded) \
1675 last_counter = current_counter; \
1676 counter = current_counter - last_counter; \
1677 counter &= 0xFFFFFFFFLL; \
1680 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1682 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1683 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1684 uint64_t current_counter = (current_counter_msb << 32) | \
1685 current_counter_lsb; \
1686 if (current_counter < last_counter) \
1687 current_counter += 0x1000000000LL; \
1688 if (!hw->offset_loaded) \
1689 last_counter = current_counter; \
1690 counter = current_counter - last_counter; \
1691 counter &= 0xFFFFFFFFFLL; \
1695 txgbe_read_stats_registers(struct txgbe_hw *hw,
1696 struct txgbe_hw_stats *hw_stats)
1701 for (i = 0; i < hw->nb_rx_queues; i++) {
1702 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1703 hw->qp_last[i].rx_qp_packets,
1704 hw_stats->qp[i].rx_qp_packets);
1705 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1706 hw->qp_last[i].rx_qp_bytes,
1707 hw_stats->qp[i].rx_qp_bytes);
1708 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1709 hw->qp_last[i].rx_qp_mc_packets,
1710 hw_stats->qp[i].rx_qp_mc_packets);
1713 for (i = 0; i < hw->nb_tx_queues; i++) {
1714 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1715 hw->qp_last[i].tx_qp_packets,
1716 hw_stats->qp[i].tx_qp_packets);
1717 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1718 hw->qp_last[i].tx_qp_bytes,
1719 hw_stats->qp[i].tx_qp_bytes);
1722 for (i = 0; i < TXGBE_MAX_UP; i++) {
1723 hw_stats->up[i].rx_up_xon_packets +=
1724 rd32(hw, TXGBE_PBRXUPXON(i));
1725 hw_stats->up[i].rx_up_xoff_packets +=
1726 rd32(hw, TXGBE_PBRXUPXOFF(i));
1727 hw_stats->up[i].tx_up_xon_packets +=
1728 rd32(hw, TXGBE_PBTXUPXON(i));
1729 hw_stats->up[i].tx_up_xoff_packets +=
1730 rd32(hw, TXGBE_PBTXUPXOFF(i));
1731 hw_stats->up[i].tx_up_xon2off_packets +=
1732 rd32(hw, TXGBE_PBTXUPOFF(i));
1733 hw_stats->up[i].rx_up_dropped +=
1734 rd32(hw, TXGBE_PBRXMISS(i));
1736 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1737 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1738 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1739 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1742 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1743 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1745 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1746 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1747 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1750 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1751 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1752 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1754 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1755 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1756 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1758 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1759 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1761 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1762 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1763 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1764 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1765 hw_stats->rx_size_512_to_1023_packets +=
1766 rd64(hw, TXGBE_MACRX512TO1023L);
1767 hw_stats->rx_size_1024_to_max_packets +=
1768 rd64(hw, TXGBE_MACRX1024TOMAXL);
1769 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1770 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1771 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1772 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1773 hw_stats->tx_size_512_to_1023_packets +=
1774 rd64(hw, TXGBE_MACTX512TO1023L);
1775 hw_stats->tx_size_1024_to_max_packets +=
1776 rd64(hw, TXGBE_MACTX1024TOMAXL);
1778 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1779 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1780 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1783 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1784 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1785 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1786 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1789 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1790 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1791 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1792 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1793 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1794 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1795 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1797 /* Flow Director Stats */
1798 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1799 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1800 hw_stats->flow_director_added_filters +=
1801 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1802 hw_stats->flow_director_removed_filters +=
1803 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1804 hw_stats->flow_director_filter_add_errors +=
1805 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1806 hw_stats->flow_director_filter_remove_errors +=
1807 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1810 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1811 hw_stats->tx_macsec_pkts_encrypted +=
1812 rd32(hw, TXGBE_LSECTX_ENCPKT);
1813 hw_stats->tx_macsec_pkts_protected +=
1814 rd32(hw, TXGBE_LSECTX_PROTPKT);
1815 hw_stats->tx_macsec_octets_encrypted +=
1816 rd32(hw, TXGBE_LSECTX_ENCOCT);
1817 hw_stats->tx_macsec_octets_protected +=
1818 rd32(hw, TXGBE_LSECTX_PROTOCT);
1819 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1820 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1821 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1822 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1823 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1824 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1825 hw_stats->rx_macsec_sc_pkts_unchecked +=
1826 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1827 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1828 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1829 for (i = 0; i < 2; i++) {
1830 hw_stats->rx_macsec_sa_pkts_ok +=
1831 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1832 hw_stats->rx_macsec_sa_pkts_invalid +=
1833 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1834 hw_stats->rx_macsec_sa_pkts_notvalid +=
1835 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1837 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1838 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1839 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1840 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1842 hw_stats->rx_total_missed_packets = 0;
1843 for (i = 0; i < TXGBE_MAX_UP; i++) {
1844 hw_stats->rx_total_missed_packets +=
1845 hw_stats->up[i].rx_up_dropped;
1850 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1852 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1853 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1854 struct txgbe_stat_mappings *stat_mappings =
1855 TXGBE_DEV_STAT_MAPPINGS(dev);
1858 txgbe_read_stats_registers(hw, hw_stats);
1863 /* Fill out the rte_eth_stats statistics structure */
1864 stats->ipackets = hw_stats->rx_packets;
1865 stats->ibytes = hw_stats->rx_bytes;
1866 stats->opackets = hw_stats->tx_packets;
1867 stats->obytes = hw_stats->tx_bytes;
1869 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1870 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1871 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1872 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1873 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1874 for (i = 0; i < TXGBE_MAX_QP; i++) {
1875 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1876 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1879 q_map = (stat_mappings->rqsm[n] >> offset)
1880 & QMAP_FIELD_RESERVED_BITS_MASK;
1881 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1882 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1883 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1884 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1886 q_map = (stat_mappings->tqsm[n] >> offset)
1887 & QMAP_FIELD_RESERVED_BITS_MASK;
1888 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1889 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1890 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1891 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1895 stats->imissed = hw_stats->rx_total_missed_packets;
1896 stats->ierrors = hw_stats->rx_crc_errors +
1897 hw_stats->rx_mac_short_packet_dropped +
1898 hw_stats->rx_length_errors +
1899 hw_stats->rx_undersize_errors +
1900 hw_stats->rx_oversize_errors +
1901 hw_stats->rx_drop_packets +
1902 hw_stats->rx_illegal_byte_errors +
1903 hw_stats->rx_error_bytes +
1904 hw_stats->rx_fragment_errors +
1905 hw_stats->rx_fcoe_crc_errors +
1906 hw_stats->rx_fcoe_mbuf_allocation_errors;
1914 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1916 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1917 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1919 /* HW registers are cleared on read */
1920 hw->offset_loaded = 0;
1921 txgbe_dev_stats_get(dev, NULL);
1922 hw->offset_loaded = 1;
1924 /* Reset software totals */
1925 memset(hw_stats, 0, sizeof(*hw_stats));
1930 /* This function calculates the number of xstats based on the current config */
1932 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1934 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1935 return TXGBE_NB_HW_STATS +
1936 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1937 TXGBE_NB_QP_STATS * nb_queues;
1941 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1945 /* Extended stats from txgbe_hw_stats */
1946 if (id < TXGBE_NB_HW_STATS) {
1947 snprintf(name, size, "[hw]%s",
1948 rte_txgbe_stats_strings[id].name);
1951 id -= TXGBE_NB_HW_STATS;
1953 /* Priority Stats */
1954 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1955 nb = id / TXGBE_NB_UP_STATS;
1956 st = id % TXGBE_NB_UP_STATS;
1957 snprintf(name, size, "[p%u]%s", nb,
1958 rte_txgbe_up_strings[st].name);
1961 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1964 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1965 nb = id / TXGBE_NB_QP_STATS;
1966 st = id % TXGBE_NB_QP_STATS;
1967 snprintf(name, size, "[q%u]%s", nb,
1968 rte_txgbe_qp_strings[st].name);
1971 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1973 return -(int)(id + 1);
1977 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1981 /* Extended stats from txgbe_hw_stats */
1982 if (id < TXGBE_NB_HW_STATS) {
1983 *offset = rte_txgbe_stats_strings[id].offset;
1986 id -= TXGBE_NB_HW_STATS;
1988 /* Priority Stats */
1989 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1990 nb = id / TXGBE_NB_UP_STATS;
1991 st = id % TXGBE_NB_UP_STATS;
1992 *offset = rte_txgbe_up_strings[st].offset +
1993 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1996 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1999 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2000 nb = id / TXGBE_NB_QP_STATS;
2001 st = id % TXGBE_NB_QP_STATS;
2002 *offset = rte_txgbe_qp_strings[st].offset +
2003 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2006 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2008 return -(int)(id + 1);
2011 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2012 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2014 unsigned int i, count;
2016 count = txgbe_xstats_calc_num(dev);
2017 if (xstats_names == NULL)
2020 /* Note: limit >= cnt_stats checked upstream
2021 * in rte_eth_xstats_names()
2023 limit = min(limit, count);
2025 /* Extended stats from txgbe_hw_stats */
2026 for (i = 0; i < limit; i++) {
2027 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2028 sizeof(xstats_names[i].name))) {
2029 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2037 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2038 struct rte_eth_xstat_name *xstats_names,
2039 const uint64_t *ids,
2045 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2047 for (i = 0; i < limit; i++) {
2048 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2049 sizeof(xstats_names[i].name))) {
2050 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2059 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2062 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2063 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2064 unsigned int i, count;
2066 txgbe_read_stats_registers(hw, hw_stats);
2068 /* If this is a reset xstats is NULL, and we have cleared the
2069 * registers by reading them.
2071 count = txgbe_xstats_calc_num(dev);
2075 limit = min(limit, txgbe_xstats_calc_num(dev));
2077 /* Extended stats from txgbe_hw_stats */
2078 for (i = 0; i < limit; i++) {
2079 uint32_t offset = 0;
2081 if (txgbe_get_offset_by_id(i, &offset)) {
2082 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2085 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2093 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2096 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2097 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2098 unsigned int i, count;
2100 txgbe_read_stats_registers(hw, hw_stats);
2102 /* If this is a reset xstats is NULL, and we have cleared the
2103 * registers by reading them.
2105 count = txgbe_xstats_calc_num(dev);
2109 limit = min(limit, txgbe_xstats_calc_num(dev));
2111 /* Extended stats from txgbe_hw_stats */
2112 for (i = 0; i < limit; i++) {
2115 if (txgbe_get_offset_by_id(i, &offset)) {
2116 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2119 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2126 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2127 uint64_t *values, unsigned int limit)
2129 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2133 return txgbe_dev_xstats_get_(dev, values, limit);
2135 for (i = 0; i < limit; i++) {
2138 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2139 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2142 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2149 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2151 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2152 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2154 /* HW registers are cleared on read */
2155 hw->offset_loaded = 0;
2156 txgbe_read_stats_registers(hw, hw_stats);
2157 hw->offset_loaded = 1;
2159 /* Reset software totals */
2160 memset(hw_stats, 0, sizeof(*hw_stats));
2166 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2168 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2169 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2171 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2172 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2173 dev_info->min_rx_bufsize = 1024;
2174 dev_info->max_rx_pktlen = 15872;
2175 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2176 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2177 dev_info->max_vfs = pci_dev->max_vfs;
2178 dev_info->max_vmdq_pools = ETH_64_POOLS;
2179 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2180 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2181 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2182 dev_info->rx_queue_offload_capa);
2183 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2184 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2186 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2188 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2189 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2190 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2192 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2197 dev_info->default_txconf = (struct rte_eth_txconf) {
2199 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2200 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2201 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2203 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2207 dev_info->rx_desc_lim = rx_desc_lim;
2208 dev_info->tx_desc_lim = tx_desc_lim;
2210 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2211 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2212 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2214 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2215 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2217 /* Driver-preferred Rx/Tx parameters */
2218 dev_info->default_rxportconf.burst_size = 32;
2219 dev_info->default_txportconf.burst_size = 32;
2220 dev_info->default_rxportconf.nb_queues = 1;
2221 dev_info->default_txportconf.nb_queues = 1;
2222 dev_info->default_rxportconf.ring_size = 256;
2223 dev_info->default_txportconf.ring_size = 256;
2229 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2231 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2232 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2233 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2234 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2235 return txgbe_get_supported_ptypes();
2241 txgbe_dev_setup_link_alarm_handler(void *param)
2243 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2244 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2245 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2247 bool autoneg = false;
2249 speed = hw->phy.autoneg_advertised;
2251 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2253 hw->mac.setup_link(hw, speed, true);
2255 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2258 /* return 0 means link status changed, -1 means not changed */
2260 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2261 int wait_to_complete)
2263 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2264 struct rte_eth_link link;
2265 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2266 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2271 memset(&link, 0, sizeof(link));
2272 link.link_status = ETH_LINK_DOWN;
2273 link.link_speed = ETH_SPEED_NUM_NONE;
2274 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2275 link.link_autoneg = ETH_LINK_AUTONEG;
2277 hw->mac.get_link_status = true;
2279 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2280 return rte_eth_linkstatus_set(dev, &link);
2282 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2283 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2286 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2289 link.link_speed = ETH_SPEED_NUM_100M;
2290 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2291 return rte_eth_linkstatus_set(dev, &link);
2295 if (hw->phy.media_type == txgbe_media_type_fiber) {
2296 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2297 rte_eal_alarm_set(10,
2298 txgbe_dev_setup_link_alarm_handler, dev);
2300 return rte_eth_linkstatus_set(dev, &link);
2303 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2304 link.link_status = ETH_LINK_UP;
2305 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2307 switch (link_speed) {
2309 case TXGBE_LINK_SPEED_UNKNOWN:
2310 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2311 link.link_speed = ETH_SPEED_NUM_100M;
2314 case TXGBE_LINK_SPEED_100M_FULL:
2315 link.link_speed = ETH_SPEED_NUM_100M;
2318 case TXGBE_LINK_SPEED_1GB_FULL:
2319 link.link_speed = ETH_SPEED_NUM_1G;
2322 case TXGBE_LINK_SPEED_2_5GB_FULL:
2323 link.link_speed = ETH_SPEED_NUM_2_5G;
2326 case TXGBE_LINK_SPEED_5GB_FULL:
2327 link.link_speed = ETH_SPEED_NUM_5G;
2330 case TXGBE_LINK_SPEED_10GB_FULL:
2331 link.link_speed = ETH_SPEED_NUM_10G;
2335 return rte_eth_linkstatus_set(dev, &link);
2339 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2341 return txgbe_dev_link_update_share(dev, wait_to_complete);
2345 * It clears the interrupt causes and enables the interrupt.
2346 * It will be called once only during nic initialized.
2349 * Pointer to struct rte_eth_dev.
2351 * Enable or Disable.
2354 * - On success, zero.
2355 * - On failure, a negative value.
2358 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2360 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2362 txgbe_dev_link_status_print(dev);
2364 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2366 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2372 * It clears the interrupt causes and enables the interrupt.
2373 * It will be called once only during nic initialized.
2376 * Pointer to struct rte_eth_dev.
2379 * - On success, zero.
2380 * - On failure, a negative value.
2383 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2385 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2387 intr->mask[0] |= TXGBE_ICR_MASK;
2388 intr->mask[1] |= TXGBE_ICR_MASK;
2394 * It clears the interrupt causes and enables the interrupt.
2395 * It will be called once only during nic initialized.
2398 * Pointer to struct rte_eth_dev.
2401 * - On success, zero.
2402 * - On failure, a negative value.
2405 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2407 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2409 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2415 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2418 * Pointer to struct rte_eth_dev.
2421 * - On success, zero.
2422 * - On failure, a negative value.
2425 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2428 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2429 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2431 /* clear all cause mask */
2432 txgbe_disable_intr(hw);
2434 /* read-on-clear nic registers here */
2435 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2436 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2440 /* set flag for async link update */
2441 if (eicr & TXGBE_ICRMISC_LSC)
2442 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2444 if (eicr & TXGBE_ICRMISC_VFMBX)
2445 intr->flags |= TXGBE_FLAG_MAILBOX;
2447 if (eicr & TXGBE_ICRMISC_LNKSEC)
2448 intr->flags |= TXGBE_FLAG_MACSEC;
2450 if (eicr & TXGBE_ICRMISC_GPIO)
2451 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2457 * It gets and then prints the link status.
2460 * Pointer to struct rte_eth_dev.
2463 * - On success, zero.
2464 * - On failure, a negative value.
2467 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2469 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2470 struct rte_eth_link link;
2472 rte_eth_linkstatus_get(dev, &link);
2474 if (link.link_status) {
2475 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2476 (int)(dev->data->port_id),
2477 (unsigned int)link.link_speed,
2478 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2479 "full-duplex" : "half-duplex");
2481 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2482 (int)(dev->data->port_id));
2484 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2485 pci_dev->addr.domain,
2487 pci_dev->addr.devid,
2488 pci_dev->addr.function);
2492 * It executes link_update after knowing an interrupt occurred.
2495 * Pointer to struct rte_eth_dev.
2498 * - On success, zero.
2499 * - On failure, a negative value.
2502 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2503 struct rte_intr_handle *intr_handle)
2505 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2507 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2509 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2511 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2512 txgbe_pf_mbx_process(dev);
2513 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2516 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2517 hw->phy.handle_lasi(hw);
2518 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2521 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2522 struct rte_eth_link link;
2524 /*get the link status before link update, for predicting later*/
2525 rte_eth_linkstatus_get(dev, &link);
2527 txgbe_dev_link_update(dev, 0);
2530 if (!link.link_status)
2531 /* handle it 1 sec later, wait it being stable */
2532 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2533 /* likely to down */
2535 /* handle it 4 sec later, wait it being stable */
2536 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2538 txgbe_dev_link_status_print(dev);
2539 if (rte_eal_alarm_set(timeout * 1000,
2540 txgbe_dev_interrupt_delayed_handler,
2542 PMD_DRV_LOG(ERR, "Error setting alarm");
2544 /* remember original mask */
2545 intr->mask_misc_orig = intr->mask_misc;
2546 /* only disable lsc interrupt */
2547 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2551 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2552 txgbe_enable_intr(dev);
2553 rte_intr_enable(intr_handle);
2559 * Interrupt handler which shall be registered for alarm callback for delayed
2560 * handling specific interrupt to wait for the stable nic state. As the
2561 * NIC interrupt state is not stable for txgbe after link is just down,
2562 * it needs to wait 4 seconds to get the stable status.
2565 * Pointer to interrupt handle.
2567 * The address of parameter (struct rte_eth_dev *) registered before.
2573 txgbe_dev_interrupt_delayed_handler(void *param)
2575 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2576 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2577 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2578 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2579 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2582 txgbe_disable_intr(hw);
2584 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2585 if (eicr & TXGBE_ICRMISC_VFMBX)
2586 txgbe_pf_mbx_process(dev);
2588 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2589 hw->phy.handle_lasi(hw);
2590 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2593 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2594 txgbe_dev_link_update(dev, 0);
2595 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2596 txgbe_dev_link_status_print(dev);
2597 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2601 if (intr->flags & TXGBE_FLAG_MACSEC) {
2602 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2604 intr->flags &= ~TXGBE_FLAG_MACSEC;
2607 /* restore original mask */
2608 intr->mask_misc = intr->mask_misc_orig;
2609 intr->mask_misc_orig = 0;
2611 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2612 txgbe_enable_intr(dev);
2613 rte_intr_enable(intr_handle);
2617 * Interrupt handler triggered by NIC for handling
2618 * specific interrupt.
2621 * Pointer to interrupt handle.
2623 * The address of parameter (struct rte_eth_dev *) registered before.
2629 txgbe_dev_interrupt_handler(void *param)
2631 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2633 txgbe_dev_interrupt_get_status(dev);
2634 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2638 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2639 struct rte_eth_rss_reta_entry64 *reta_conf,
2644 uint16_t idx, shift;
2645 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2646 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2648 PMD_INIT_FUNC_TRACE();
2650 if (!txgbe_rss_update_sp(hw->mac.type)) {
2651 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2656 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2657 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2658 "(%d) doesn't match the number hardware can supported "
2659 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2663 for (i = 0; i < reta_size; i += 4) {
2664 idx = i / RTE_RETA_GROUP_SIZE;
2665 shift = i % RTE_RETA_GROUP_SIZE;
2666 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2670 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2671 for (j = 0; j < 4; j++) {
2672 if (RS8(mask, j, 0x1)) {
2673 reta &= ~(MS32(8 * j, 0xFF));
2674 reta |= LS32(reta_conf[idx].reta[shift + j],
2678 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
2680 adapter->rss_reta_updated = 1;
2686 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2687 struct rte_eth_rss_reta_entry64 *reta_conf,
2690 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2693 uint16_t idx, shift;
2695 PMD_INIT_FUNC_TRACE();
2697 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2698 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2699 "(%d) doesn't match the number hardware can supported "
2700 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2704 for (i = 0; i < reta_size; i += 4) {
2705 idx = i / RTE_RETA_GROUP_SIZE;
2706 shift = i % RTE_RETA_GROUP_SIZE;
2707 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2711 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2712 for (j = 0; j < 4; j++) {
2713 if (RS8(mask, j, 0x1))
2714 reta_conf[idx].reta[shift + j] =
2715 (uint16_t)RS32(reta, 8 * j, 0xFF);
2723 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2724 uint32_t index, uint32_t pool)
2726 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2727 uint32_t enable_addr = 1;
2729 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2734 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2736 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2738 txgbe_clear_rar(hw, index);
2742 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2744 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2746 txgbe_remove_rar(dev, 0);
2747 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2753 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2755 uint32_t vector = 0;
2757 switch (hw->mac.mc_filter_type) {
2758 case 0: /* use bits [47:36] of the address */
2759 vector = ((uc_addr->addr_bytes[4] >> 4) |
2760 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2762 case 1: /* use bits [46:35] of the address */
2763 vector = ((uc_addr->addr_bytes[4] >> 3) |
2764 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2766 case 2: /* use bits [45:34] of the address */
2767 vector = ((uc_addr->addr_bytes[4] >> 2) |
2768 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2770 case 3: /* use bits [43:32] of the address */
2771 vector = ((uc_addr->addr_bytes[4]) |
2772 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2774 default: /* Invalid mc_filter_type */
2778 /* vector can only be 12-bits or boundary will be exceeded */
2784 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2785 struct rte_ether_addr *mac_addr, uint8_t on)
2793 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2794 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2796 /* The UTA table only exists on pf hardware */
2797 if (hw->mac.type < txgbe_mac_raptor)
2800 vector = txgbe_uta_vector(hw, mac_addr);
2801 uta_idx = (vector >> 5) & 0x7F;
2802 uta_mask = 0x1UL << (vector & 0x1F);
2804 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2807 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2809 uta_info->uta_in_use++;
2810 reg_val |= uta_mask;
2811 uta_info->uta_shadow[uta_idx] |= uta_mask;
2813 uta_info->uta_in_use--;
2814 reg_val &= ~uta_mask;
2815 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2818 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2820 psrctl = rd32(hw, TXGBE_PSRCTL);
2821 if (uta_info->uta_in_use > 0)
2822 psrctl |= TXGBE_PSRCTL_UCHFENA;
2824 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2826 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2827 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2828 wr32(hw, TXGBE_PSRCTL, psrctl);
2834 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2836 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2837 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2841 /* The UTA table only exists on pf hardware */
2842 if (hw->mac.type < txgbe_mac_raptor)
2846 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2847 uta_info->uta_shadow[i] = ~0;
2848 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2851 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2852 uta_info->uta_shadow[i] = 0;
2853 wr32(hw, TXGBE_UCADDRTBL(i), 0);
2857 psrctl = rd32(hw, TXGBE_PSRCTL);
2859 psrctl |= TXGBE_PSRCTL_UCHFENA;
2861 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2863 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2864 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2865 wr32(hw, TXGBE_PSRCTL, psrctl);
2871 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
2873 uint32_t new_val = orig_val;
2875 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
2876 new_val |= TXGBE_POOLETHCTL_UTA;
2877 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
2878 new_val |= TXGBE_POOLETHCTL_MCHA;
2879 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
2880 new_val |= TXGBE_POOLETHCTL_UCHA;
2881 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
2882 new_val |= TXGBE_POOLETHCTL_BCA;
2883 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
2884 new_val |= TXGBE_POOLETHCTL_MCP;
2890 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2892 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2893 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2895 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2897 if (queue_id < 32) {
2898 mask = rd32(hw, TXGBE_IMS(0));
2899 mask &= (1 << queue_id);
2900 wr32(hw, TXGBE_IMS(0), mask);
2901 } else if (queue_id < 64) {
2902 mask = rd32(hw, TXGBE_IMS(1));
2903 mask &= (1 << (queue_id - 32));
2904 wr32(hw, TXGBE_IMS(1), mask);
2906 rte_intr_enable(intr_handle);
2912 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2915 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2917 if (queue_id < 32) {
2918 mask = rd32(hw, TXGBE_IMS(0));
2919 mask &= ~(1 << queue_id);
2920 wr32(hw, TXGBE_IMS(0), mask);
2921 } else if (queue_id < 64) {
2922 mask = rd32(hw, TXGBE_IMS(1));
2923 mask &= ~(1 << (queue_id - 32));
2924 wr32(hw, TXGBE_IMS(1), mask);
2931 * set the IVAR registers, mapping interrupt causes to vectors
2933 * pointer to txgbe_hw struct
2935 * 0 for Rx, 1 for Tx, -1 for other causes
2937 * queue to map the corresponding interrupt to
2939 * the vector to map to the corresponding queue
2942 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2943 uint8_t queue, uint8_t msix_vector)
2947 if (direction == -1) {
2949 msix_vector |= TXGBE_IVARMISC_VLD;
2951 tmp = rd32(hw, TXGBE_IVARMISC);
2952 tmp &= ~(0xFF << idx);
2953 tmp |= (msix_vector << idx);
2954 wr32(hw, TXGBE_IVARMISC, tmp);
2956 /* rx or tx causes */
2957 /* Workround for ICR lost */
2958 idx = ((16 * (queue & 1)) + (8 * direction));
2959 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2960 tmp &= ~(0xFF << idx);
2961 tmp |= (msix_vector << idx);
2962 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2967 * Sets up the hardware to properly generate MSI-X interrupts
2969 * board private structure
2972 txgbe_configure_msix(struct rte_eth_dev *dev)
2974 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2975 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2976 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2977 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2978 uint32_t vec = TXGBE_MISC_VEC_ID;
2981 /* won't configure msix register if no mapping is done
2982 * between intr vector and event fd
2983 * but if misx has been enabled already, need to configure
2984 * auto clean, auto mask and throttling.
2986 gpie = rd32(hw, TXGBE_GPIE);
2987 if (!rte_intr_dp_is_en(intr_handle) &&
2988 !(gpie & TXGBE_GPIE_MSIX))
2991 if (rte_intr_allow_others(intr_handle)) {
2992 base = TXGBE_RX_VEC_START;
2996 /* setup GPIE for MSI-x mode */
2997 gpie = rd32(hw, TXGBE_GPIE);
2998 gpie |= TXGBE_GPIE_MSIX;
2999 wr32(hw, TXGBE_GPIE, gpie);
3001 /* Populate the IVAR table and set the ITR values to the
3002 * corresponding register.
3004 if (rte_intr_dp_is_en(intr_handle)) {
3005 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3007 /* by default, 1:1 mapping */
3008 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3009 intr_handle->intr_vec[queue_id] = vec;
3010 if (vec < base + intr_handle->nb_efd - 1)
3014 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3016 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3017 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3022 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3023 uint16_t queue_idx, uint16_t tx_rate)
3025 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3028 if (queue_idx >= hw->mac.max_tx_queues)
3032 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3033 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3039 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3040 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3042 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3044 /* Set ARBTXRATE of queue X */
3045 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3046 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3053 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3054 u8 **mc_addr_ptr, u32 *vmdq)
3059 mc_addr = *mc_addr_ptr;
3060 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3065 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3066 struct rte_ether_addr *mc_addr_set,
3067 uint32_t nb_mc_addr)
3069 struct txgbe_hw *hw;
3072 hw = TXGBE_DEV_HW(dev);
3073 mc_addr_list = (u8 *)mc_addr_set;
3074 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3075 txgbe_dev_addr_list_itr, TRUE);
3079 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3082 case txgbe_mac_raptor:
3089 static const struct eth_dev_ops txgbe_eth_dev_ops = {
3090 .dev_configure = txgbe_dev_configure,
3091 .dev_infos_get = txgbe_dev_info_get,
3092 .dev_start = txgbe_dev_start,
3093 .dev_stop = txgbe_dev_stop,
3094 .dev_set_link_up = txgbe_dev_set_link_up,
3095 .dev_set_link_down = txgbe_dev_set_link_down,
3096 .dev_close = txgbe_dev_close,
3097 .dev_reset = txgbe_dev_reset,
3098 .link_update = txgbe_dev_link_update,
3099 .stats_get = txgbe_dev_stats_get,
3100 .xstats_get = txgbe_dev_xstats_get,
3101 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
3102 .stats_reset = txgbe_dev_stats_reset,
3103 .xstats_reset = txgbe_dev_xstats_reset,
3104 .xstats_get_names = txgbe_dev_xstats_get_names,
3105 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
3106 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
3107 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
3108 .vlan_filter_set = txgbe_vlan_filter_set,
3109 .vlan_tpid_set = txgbe_vlan_tpid_set,
3110 .vlan_offload_set = txgbe_vlan_offload_set,
3111 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
3112 .rx_queue_start = txgbe_dev_rx_queue_start,
3113 .rx_queue_stop = txgbe_dev_rx_queue_stop,
3114 .tx_queue_start = txgbe_dev_tx_queue_start,
3115 .tx_queue_stop = txgbe_dev_tx_queue_stop,
3116 .rx_queue_setup = txgbe_dev_rx_queue_setup,
3117 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
3118 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
3119 .rx_queue_release = txgbe_dev_rx_queue_release,
3120 .tx_queue_setup = txgbe_dev_tx_queue_setup,
3121 .tx_queue_release = txgbe_dev_tx_queue_release,
3122 .mac_addr_add = txgbe_add_rar,
3123 .mac_addr_remove = txgbe_remove_rar,
3124 .mac_addr_set = txgbe_set_default_mac_addr,
3125 .uc_hash_table_set = txgbe_uc_hash_table_set,
3126 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
3127 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
3128 .reta_update = txgbe_dev_rss_reta_update,
3129 .reta_query = txgbe_dev_rss_reta_query,
3130 .rss_hash_update = txgbe_dev_rss_hash_update,
3131 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
3132 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
3133 .rxq_info_get = txgbe_rxq_info_get,
3134 .txq_info_get = txgbe_txq_info_get,
3137 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
3138 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
3139 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
3141 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
3142 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
3144 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
3145 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
3147 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
3148 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
3151 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
3152 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);