1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41 struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (h)->bitmap[idx] |= 1 << bit;\
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55 (h)->bitmap[idx] &= ~(1 << bit);\
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61 (r) = (h)->bitmap[idx] >> bit & 1;\
65 * The set of PCI devices this driver supports
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70 { .vendor_id = 0, /* sentinel */ },
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74 .nb_max = TXGBE_RING_DESC_MAX,
75 .nb_min = TXGBE_RING_DESC_MIN,
76 .nb_align = TXGBE_RXD_ALIGN,
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80 .nb_max = TXGBE_RING_DESC_MAX,
81 .nb_min = TXGBE_RING_DESC_MIN,
82 .nb_align = TXGBE_TXD_ALIGN,
83 .nb_seg_max = TXGBE_TX_MAX_SEG,
84 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
93 HW_XSTAT(mng_bmc2host_packets),
94 HW_XSTAT(mng_host2bmc_packets),
100 HW_XSTAT(rx_total_bytes),
101 HW_XSTAT(rx_total_packets),
102 HW_XSTAT(tx_total_packets),
103 HW_XSTAT(rx_total_missed_packets),
104 HW_XSTAT(rx_broadcast_packets),
105 HW_XSTAT(rx_multicast_packets),
106 HW_XSTAT(rx_management_packets),
107 HW_XSTAT(tx_management_packets),
108 HW_XSTAT(rx_management_dropped),
111 HW_XSTAT(rx_crc_errors),
112 HW_XSTAT(rx_illegal_byte_errors),
113 HW_XSTAT(rx_error_bytes),
114 HW_XSTAT(rx_mac_short_packet_dropped),
115 HW_XSTAT(rx_length_errors),
116 HW_XSTAT(rx_undersize_errors),
117 HW_XSTAT(rx_fragment_errors),
118 HW_XSTAT(rx_oversize_errors),
119 HW_XSTAT(rx_jabber_errors),
120 HW_XSTAT(rx_l3_l4_xsum_error),
121 HW_XSTAT(mac_local_errors),
122 HW_XSTAT(mac_remote_errors),
125 HW_XSTAT(flow_director_added_filters),
126 HW_XSTAT(flow_director_removed_filters),
127 HW_XSTAT(flow_director_filter_add_errors),
128 HW_XSTAT(flow_director_filter_remove_errors),
129 HW_XSTAT(flow_director_matched_filters),
130 HW_XSTAT(flow_director_missed_filters),
133 HW_XSTAT(rx_fcoe_crc_errors),
134 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135 HW_XSTAT(rx_fcoe_dropped),
136 HW_XSTAT(rx_fcoe_packets),
137 HW_XSTAT(tx_fcoe_packets),
138 HW_XSTAT(rx_fcoe_bytes),
139 HW_XSTAT(tx_fcoe_bytes),
140 HW_XSTAT(rx_fcoe_no_ddp),
141 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
144 HW_XSTAT(tx_macsec_pkts_untagged),
145 HW_XSTAT(tx_macsec_pkts_encrypted),
146 HW_XSTAT(tx_macsec_pkts_protected),
147 HW_XSTAT(tx_macsec_octets_encrypted),
148 HW_XSTAT(tx_macsec_octets_protected),
149 HW_XSTAT(rx_macsec_pkts_untagged),
150 HW_XSTAT(rx_macsec_pkts_badtag),
151 HW_XSTAT(rx_macsec_pkts_nosci),
152 HW_XSTAT(rx_macsec_pkts_unknownsci),
153 HW_XSTAT(rx_macsec_octets_decrypted),
154 HW_XSTAT(rx_macsec_octets_validated),
155 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156 HW_XSTAT(rx_macsec_sc_pkts_delayed),
157 HW_XSTAT(rx_macsec_sc_pkts_late),
158 HW_XSTAT(rx_macsec_sa_pkts_ok),
159 HW_XSTAT(rx_macsec_sa_pkts_invalid),
160 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
165 HW_XSTAT(rx_size_64_packets),
166 HW_XSTAT(rx_size_65_to_127_packets),
167 HW_XSTAT(rx_size_128_to_255_packets),
168 HW_XSTAT(rx_size_256_to_511_packets),
169 HW_XSTAT(rx_size_512_to_1023_packets),
170 HW_XSTAT(rx_size_1024_to_max_packets),
171 HW_XSTAT(tx_size_64_packets),
172 HW_XSTAT(tx_size_65_to_127_packets),
173 HW_XSTAT(tx_size_128_to_255_packets),
174 HW_XSTAT(tx_size_256_to_511_packets),
175 HW_XSTAT(tx_size_512_to_1023_packets),
176 HW_XSTAT(tx_size_1024_to_max_packets),
179 HW_XSTAT(tx_xon_packets),
180 HW_XSTAT(rx_xon_packets),
181 HW_XSTAT(tx_xoff_packets),
182 HW_XSTAT(rx_xoff_packets),
184 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191 sizeof(rte_txgbe_stats_strings[0]))
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196 UP_XSTAT(rx_up_packets),
197 UP_XSTAT(tx_up_packets),
198 UP_XSTAT(rx_up_bytes),
199 UP_XSTAT(tx_up_bytes),
200 UP_XSTAT(rx_up_drop_packets),
202 UP_XSTAT(tx_up_xon_packets),
203 UP_XSTAT(rx_up_xon_packets),
204 UP_XSTAT(tx_up_xoff_packets),
205 UP_XSTAT(rx_up_xoff_packets),
206 UP_XSTAT(rx_up_dropped),
207 UP_XSTAT(rx_up_mbuf_alloc_errors),
208 UP_XSTAT(tx_up_xon2off_packets),
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212 sizeof(rte_txgbe_up_strings[0]))
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217 QP_XSTAT(rx_qp_packets),
218 QP_XSTAT(tx_qp_packets),
219 QP_XSTAT(rx_qp_bytes),
220 QP_XSTAT(tx_qp_bytes),
221 QP_XSTAT(rx_qp_mc_packets),
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225 sizeof(rte_txgbe_qp_strings[0]))
228 txgbe_is_sfp(struct txgbe_hw *hw)
230 switch (hw->phy.type) {
231 case txgbe_phy_sfp_avago:
232 case txgbe_phy_sfp_ftl:
233 case txgbe_phy_sfp_intel:
234 case txgbe_phy_sfp_unknown:
235 case txgbe_phy_sfp_tyco_passive:
236 case txgbe_phy_sfp_unknown_passive:
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
249 status = hw->mac.reset_hw(hw);
251 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
257 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
263 txgbe_enable_intr(struct rte_eth_dev *dev)
265 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
268 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
275 txgbe_disable_intr(struct txgbe_hw *hw)
277 PMD_INIT_FUNC_TRACE();
279 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
291 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292 struct txgbe_stat_mappings *stat_mappings =
293 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294 uint32_t qsmr_mask = 0;
295 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
299 if (hw->mac.type != txgbe_mac_raptor)
302 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
305 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
309 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310 if (n >= TXGBE_NB_STAT_MAPPING) {
311 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
314 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
316 /* Now clear any previous stat_idx set */
317 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
319 stat_mappings->tqsm[n] &= ~clearing_mask;
321 stat_mappings->rqsm[n] &= ~clearing_mask;
323 q_map = (uint32_t)stat_idx;
324 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
327 stat_mappings->tqsm[n] |= qsmr_mask;
329 stat_mappings->rqsm[n] |= qsmr_mask;
331 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
334 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
340 * Ensure that all locks are released before first NVM or PHY access
343 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
348 * These ones are more tricky since they are common to all ports; but
349 * swfw_sync retries last long enough (1s) to be almost sure that if
350 * lock can not be taken it is due to an improper lock of the
353 mask = TXGBE_MNGSEM_SWPHY |
355 TXGBE_MNGSEM_SWFLASH;
356 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
357 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
359 hw->mac.release_swfw_sync(hw, mask);
363 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
365 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
366 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
368 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
369 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
370 const struct rte_memzone *mz;
375 PMD_INIT_FUNC_TRACE();
377 eth_dev->dev_ops = &txgbe_eth_dev_ops;
378 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
379 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
380 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
383 * For secondary processes, we don't initialise any further as primary
384 * has already done this work. Only check we don't need a different
385 * RX and TX function.
387 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
388 struct txgbe_tx_queue *txq;
389 /* TX queue function in primary, set by last queue initialized
390 * Tx queue may not initialized by primary process
392 if (eth_dev->data->tx_queues) {
393 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
394 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
395 txgbe_set_tx_function(eth_dev, txq);
397 /* Use default TX function if we get here */
398 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
399 "Using default TX function.");
402 txgbe_set_rx_function(eth_dev);
407 rte_eth_copy_pci_info(eth_dev, pci_dev);
409 /* Vendor and Device ID need to be set before init of shared code */
410 hw->device_id = pci_dev->id.device_id;
411 hw->vendor_id = pci_dev->id.vendor_id;
412 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
413 hw->allow_unsupported_sfp = 1;
415 /* Reserve memory for interrupt status block */
416 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
417 16, TXGBE_ALIGN, SOCKET_ID_ANY);
421 hw->isb_dma = TMZ_PADDR(mz);
422 hw->isb_mem = TMZ_VADDR(mz);
424 /* Initialize the shared code (base driver) */
425 err = txgbe_init_shared_code(hw);
427 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
431 /* Unlock any pending hardware semaphore */
432 txgbe_swfw_lock_reset(hw);
434 err = hw->rom.init_params(hw);
436 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
440 /* Make sure we have a good EEPROM before we read from it */
441 err = hw->rom.validate_checksum(hw, &csum);
443 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
447 err = hw->mac.init_hw(hw);
450 * Devices with copper phys will fail to initialise if txgbe_init_hw()
451 * is called too soon after the kernel driver unbinding/binding occurs.
452 * The failure occurs in txgbe_identify_phy() for all devices,
453 * but for non-copper devies, txgbe_identify_sfp_module() is
454 * also called. See txgbe_identify_phy(). The reason for the
455 * failure is not known, and only occuts when virtualisation features
456 * are disabled in the bios. A delay of 200ms was found to be enough by
457 * trial-and-error, and is doubled to be safe.
459 if (err && hw->phy.media_type == txgbe_media_type_copper) {
461 err = hw->mac.init_hw(hw);
464 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
467 if (err == TXGBE_ERR_EEPROM_VERSION) {
468 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
469 "LOM. Please be aware there may be issues associated "
470 "with your hardware.");
471 PMD_INIT_LOG(ERR, "If you are experiencing problems "
472 "please contact your hardware representative "
473 "who provided you with this hardware.");
474 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
475 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
478 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
482 /* Reset the hw statistics */
483 txgbe_dev_stats_reset(eth_dev);
485 /* disable interrupt */
486 txgbe_disable_intr(hw);
488 /* Allocate memory for storing MAC addresses */
489 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
490 hw->mac.num_rar_entries, 0);
491 if (eth_dev->data->mac_addrs == NULL) {
493 "Failed to allocate %u bytes needed to store "
495 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
499 /* Copy the permanent MAC address */
500 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
501 ð_dev->data->mac_addrs[0]);
503 /* Allocate memory for storing hash filter MAC addresses */
504 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
505 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
506 if (eth_dev->data->hash_mac_addrs == NULL) {
508 "Failed to allocate %d bytes needed to store MAC addresses",
509 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
513 /* initialize the vfta */
514 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
516 /* initialize the hw strip bitmap*/
517 memset(hwstrip, 0, sizeof(*hwstrip));
519 /* initialize PF if max_vfs not zero */
520 txgbe_pf_host_init(eth_dev);
522 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
523 /* let hardware know driver is loaded */
524 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
525 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
526 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
527 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
530 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
531 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
532 (int)hw->mac.type, (int)hw->phy.type,
533 (int)hw->phy.sfp_type);
535 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
536 (int)hw->mac.type, (int)hw->phy.type);
538 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
539 eth_dev->data->port_id, pci_dev->id.vendor_id,
540 pci_dev->id.device_id);
542 rte_intr_callback_register(intr_handle,
543 txgbe_dev_interrupt_handler, eth_dev);
545 /* enable uio/vfio intr/eventfd mapping */
546 rte_intr_enable(intr_handle);
548 /* enable support intr */
549 txgbe_enable_intr(eth_dev);
555 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
557 PMD_INIT_FUNC_TRACE();
559 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562 txgbe_dev_close(eth_dev);
568 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
569 struct rte_pci_device *pci_dev)
571 struct rte_eth_dev *pf_ethdev;
572 struct rte_eth_devargs eth_da;
575 if (pci_dev->device.devargs) {
576 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
581 memset(ð_da, 0, sizeof(eth_da));
584 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
585 sizeof(struct txgbe_adapter),
586 eth_dev_pci_specific_init, pci_dev,
587 eth_txgbe_dev_init, NULL);
589 if (retval || eth_da.nb_representor_ports < 1)
592 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
593 if (pf_ethdev == NULL)
599 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
601 struct rte_eth_dev *ethdev;
603 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
607 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
610 static struct rte_pci_driver rte_txgbe_pmd = {
611 .id_table = pci_id_txgbe_map,
612 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
613 RTE_PCI_DRV_INTR_LSC,
614 .probe = eth_txgbe_pci_probe,
615 .remove = eth_txgbe_pci_remove,
619 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
621 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
622 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
627 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
628 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
629 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
634 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
636 /* update local VFTA copy */
637 shadow_vfta->vfta[vid_idx] = vfta;
643 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
645 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
646 struct txgbe_rx_queue *rxq;
648 uint32_t rxcfg, rxbal, rxbah;
651 txgbe_vlan_hw_strip_enable(dev, queue);
653 txgbe_vlan_hw_strip_disable(dev, queue);
655 rxq = dev->data->rx_queues[queue];
656 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
657 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
658 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
659 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
660 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
661 !(rxcfg & TXGBE_RXCFG_VLAN);
662 rxcfg |= TXGBE_RXCFG_VLAN;
664 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
665 (rxcfg & TXGBE_RXCFG_VLAN);
666 rxcfg &= ~TXGBE_RXCFG_VLAN;
668 rxcfg &= ~TXGBE_RXCFG_ENA;
671 /* set vlan strip for ring */
672 txgbe_dev_rx_queue_stop(dev, queue);
673 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
674 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
675 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
676 txgbe_dev_rx_queue_start(dev, queue);
681 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
682 enum rte_vlan_type vlan_type,
685 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
687 uint32_t portctrl, vlan_ext, qinq;
689 portctrl = rd32(hw, TXGBE_PORTCTL);
691 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
692 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
694 case ETH_VLAN_TYPE_INNER:
696 wr32m(hw, TXGBE_VLANCTL,
697 TXGBE_VLANCTL_TPID_MASK,
698 TXGBE_VLANCTL_TPID(tpid));
699 wr32m(hw, TXGBE_DMATXCTRL,
700 TXGBE_DMATXCTRL_TPID_MASK,
701 TXGBE_DMATXCTRL_TPID(tpid));
704 PMD_DRV_LOG(ERR, "Inner type is not supported"
709 wr32m(hw, TXGBE_TAGTPID(0),
710 TXGBE_TAGTPID_LSB_MASK,
711 TXGBE_TAGTPID_LSB(tpid));
714 case ETH_VLAN_TYPE_OUTER:
716 /* Only the high 16-bits is valid */
717 wr32m(hw, TXGBE_EXTAG,
718 TXGBE_EXTAG_VLAN_MASK,
719 TXGBE_EXTAG_VLAN(tpid));
721 wr32m(hw, TXGBE_VLANCTL,
722 TXGBE_VLANCTL_TPID_MASK,
723 TXGBE_VLANCTL_TPID(tpid));
724 wr32m(hw, TXGBE_DMATXCTRL,
725 TXGBE_DMATXCTRL_TPID_MASK,
726 TXGBE_DMATXCTRL_TPID(tpid));
730 wr32m(hw, TXGBE_TAGTPID(0),
731 TXGBE_TAGTPID_MSB_MASK,
732 TXGBE_TAGTPID_MSB(tpid));
736 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
744 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
746 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
749 PMD_INIT_FUNC_TRACE();
751 /* Filter Table Disable */
752 vlnctrl = rd32(hw, TXGBE_VLANCTL);
753 vlnctrl &= ~TXGBE_VLANCTL_VFE;
754 wr32(hw, TXGBE_VLANCTL, vlnctrl);
758 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
760 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
761 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
765 PMD_INIT_FUNC_TRACE();
767 /* Filter Table Enable */
768 vlnctrl = rd32(hw, TXGBE_VLANCTL);
769 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
770 vlnctrl |= TXGBE_VLANCTL_VFE;
771 wr32(hw, TXGBE_VLANCTL, vlnctrl);
773 /* write whatever is in local vfta copy */
774 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
775 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
779 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
781 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
782 struct txgbe_rx_queue *rxq;
784 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
788 TXGBE_SET_HWSTRIP(hwstrip, queue);
790 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
792 if (queue >= dev->data->nb_rx_queues)
795 rxq = dev->data->rx_queues[queue];
798 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
799 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
801 rxq->vlan_flags = PKT_RX_VLAN;
802 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
807 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
809 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
812 PMD_INIT_FUNC_TRACE();
814 ctrl = rd32(hw, TXGBE_RXCFG(queue));
815 ctrl &= ~TXGBE_RXCFG_VLAN;
816 wr32(hw, TXGBE_RXCFG(queue), ctrl);
818 /* record those setting for HW strip per queue */
819 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
823 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
825 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
828 PMD_INIT_FUNC_TRACE();
830 ctrl = rd32(hw, TXGBE_RXCFG(queue));
831 ctrl |= TXGBE_RXCFG_VLAN;
832 wr32(hw, TXGBE_RXCFG(queue), ctrl);
834 /* record those setting for HW strip per queue */
835 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
839 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
841 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
844 PMD_INIT_FUNC_TRACE();
846 ctrl = rd32(hw, TXGBE_PORTCTL);
847 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
848 ctrl &= ~TXGBE_PORTCTL_QINQ;
849 wr32(hw, TXGBE_PORTCTL, ctrl);
853 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
855 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
856 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
857 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
860 PMD_INIT_FUNC_TRACE();
862 ctrl = rd32(hw, TXGBE_PORTCTL);
863 ctrl |= TXGBE_PORTCTL_VLANEXT;
864 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
865 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
866 ctrl |= TXGBE_PORTCTL_QINQ;
867 wr32(hw, TXGBE_PORTCTL, ctrl);
871 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
873 struct txgbe_rx_queue *rxq;
876 PMD_INIT_FUNC_TRACE();
878 for (i = 0; i < dev->data->nb_rx_queues; i++) {
879 rxq = dev->data->rx_queues[i];
881 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
882 txgbe_vlan_strip_queue_set(dev, i, 1);
884 txgbe_vlan_strip_queue_set(dev, i, 0);
889 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
892 struct rte_eth_rxmode *rxmode;
893 struct txgbe_rx_queue *rxq;
895 if (mask & ETH_VLAN_STRIP_MASK) {
896 rxmode = &dev->data->dev_conf.rxmode;
897 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
898 for (i = 0; i < dev->data->nb_rx_queues; i++) {
899 rxq = dev->data->rx_queues[i];
900 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
903 for (i = 0; i < dev->data->nb_rx_queues; i++) {
904 rxq = dev->data->rx_queues[i];
905 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
911 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
913 struct rte_eth_rxmode *rxmode;
914 rxmode = &dev->data->dev_conf.rxmode;
916 if (mask & ETH_VLAN_STRIP_MASK)
917 txgbe_vlan_hw_strip_config(dev);
919 if (mask & ETH_VLAN_FILTER_MASK) {
920 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
921 txgbe_vlan_hw_filter_enable(dev);
923 txgbe_vlan_hw_filter_disable(dev);
926 if (mask & ETH_VLAN_EXTEND_MASK) {
927 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
928 txgbe_vlan_hw_extend_enable(dev);
930 txgbe_vlan_hw_extend_disable(dev);
937 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
939 txgbe_config_vlan_strip_on_all_queues(dev, mask);
941 txgbe_vlan_offload_config(dev, mask);
947 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
949 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
954 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
957 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
963 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
964 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
965 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
966 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
971 txgbe_check_mq_mode(struct rte_eth_dev *dev)
973 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
974 uint16_t nb_rx_q = dev->data->nb_rx_queues;
975 uint16_t nb_tx_q = dev->data->nb_tx_queues;
977 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
978 /* check multi-queue mode */
979 switch (dev_conf->rxmode.mq_mode) {
980 case ETH_MQ_RX_VMDQ_DCB:
981 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
983 case ETH_MQ_RX_VMDQ_DCB_RSS:
984 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
985 PMD_INIT_LOG(ERR, "SRIOV active,"
986 " unsupported mq_mode rx %d.",
987 dev_conf->rxmode.mq_mode);
990 case ETH_MQ_RX_VMDQ_RSS:
991 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
992 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
993 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
994 PMD_INIT_LOG(ERR, "SRIOV is active,"
995 " invalid queue number"
996 " for VMDQ RSS, allowed"
997 " value are 1, 2 or 4.");
1001 case ETH_MQ_RX_VMDQ_ONLY:
1002 case ETH_MQ_RX_NONE:
1003 /* if nothing mq mode configure, use default scheme */
1004 dev->data->dev_conf.rxmode.mq_mode =
1005 ETH_MQ_RX_VMDQ_ONLY;
1007 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1008 /* SRIOV only works in VMDq enable mode */
1009 PMD_INIT_LOG(ERR, "SRIOV is active,"
1010 " wrong mq_mode rx %d.",
1011 dev_conf->rxmode.mq_mode);
1015 switch (dev_conf->txmode.mq_mode) {
1016 case ETH_MQ_TX_VMDQ_DCB:
1017 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1018 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1020 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1021 dev->data->dev_conf.txmode.mq_mode =
1022 ETH_MQ_TX_VMDQ_ONLY;
1026 /* check valid queue number */
1027 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1028 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1029 PMD_INIT_LOG(ERR, "SRIOV is active,"
1030 " nb_rx_q=%d nb_tx_q=%d queue number"
1031 " must be less than or equal to %d.",
1033 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1037 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1038 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1042 /* check configuration for vmdb+dcb mode */
1043 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1044 const struct rte_eth_vmdq_dcb_conf *conf;
1046 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1047 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1048 TXGBE_VMDQ_DCB_NB_QUEUES);
1051 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1052 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1053 conf->nb_queue_pools == ETH_32_POOLS)) {
1054 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1055 " nb_queue_pools must be %d or %d.",
1056 ETH_16_POOLS, ETH_32_POOLS);
1060 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1061 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1063 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1064 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1065 TXGBE_VMDQ_DCB_NB_QUEUES);
1068 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1069 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1070 conf->nb_queue_pools == ETH_32_POOLS)) {
1071 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1072 " nb_queue_pools != %d and"
1073 " nb_queue_pools != %d.",
1074 ETH_16_POOLS, ETH_32_POOLS);
1079 /* For DCB mode check our configuration before we go further */
1080 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1081 const struct rte_eth_dcb_rx_conf *conf;
1083 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1084 if (!(conf->nb_tcs == ETH_4_TCS ||
1085 conf->nb_tcs == ETH_8_TCS)) {
1086 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1087 " and nb_tcs != %d.",
1088 ETH_4_TCS, ETH_8_TCS);
1093 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1094 const struct rte_eth_dcb_tx_conf *conf;
1096 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1097 if (!(conf->nb_tcs == ETH_4_TCS ||
1098 conf->nb_tcs == ETH_8_TCS)) {
1099 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1100 " and nb_tcs != %d.",
1101 ETH_4_TCS, ETH_8_TCS);
1110 txgbe_dev_configure(struct rte_eth_dev *dev)
1112 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1113 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1116 PMD_INIT_FUNC_TRACE();
1118 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1119 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1121 /* multiple queue mode checking */
1122 ret = txgbe_check_mq_mode(dev);
1124 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1129 /* set flag to update link status after init */
1130 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1133 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1134 * allocation Rx preconditions we will reset it.
1136 adapter->rx_bulk_alloc_allowed = true;
1142 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1144 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1145 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1148 gpie = rd32(hw, TXGBE_GPIOINTEN);
1149 gpie |= TXGBE_GPIOBIT_6;
1150 wr32(hw, TXGBE_GPIOINTEN, gpie);
1151 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1155 * Configure device link speed and setup link.
1156 * It returns 0 on success.
1159 txgbe_dev_start(struct rte_eth_dev *dev)
1161 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1162 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1163 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1164 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1165 uint32_t intr_vector = 0;
1167 bool link_up = false, negotiate = 0;
1169 uint32_t allowed_speeds = 0;
1172 uint32_t *link_speeds;
1174 PMD_INIT_FUNC_TRACE();
1176 /* TXGBE devices don't support:
1177 * - half duplex (checked afterwards for valid speeds)
1178 * - fixed speed: TODO implement
1180 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1182 "Invalid link_speeds for port %u, fix speed not supported",
1183 dev->data->port_id);
1187 /* Stop the link setup handler before resetting the HW. */
1188 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1190 /* disable uio/vfio intr/eventfd mapping */
1191 rte_intr_disable(intr_handle);
1194 hw->adapter_stopped = 0;
1197 /* reinitialize adapter
1198 * this calls reset and start
1200 hw->nb_rx_queues = dev->data->nb_rx_queues;
1201 hw->nb_tx_queues = dev->data->nb_tx_queues;
1202 status = txgbe_pf_reset_hw(hw);
1205 hw->mac.start_hw(hw);
1206 hw->mac.get_link_status = true;
1208 txgbe_dev_phy_intr_setup(dev);
1210 /* check and configure queue intr-vector mapping */
1211 if ((rte_intr_cap_multiple(intr_handle) ||
1212 !RTE_ETH_DEV_SRIOV(dev).active) &&
1213 dev->data->dev_conf.intr_conf.rxq != 0) {
1214 intr_vector = dev->data->nb_rx_queues;
1215 if (rte_intr_efd_enable(intr_handle, intr_vector))
1219 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1220 intr_handle->intr_vec =
1221 rte_zmalloc("intr_vec",
1222 dev->data->nb_rx_queues * sizeof(int), 0);
1223 if (intr_handle->intr_vec == NULL) {
1224 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1225 " intr_vec", dev->data->nb_rx_queues);
1230 /* confiugre msix for sleep until rx interrupt */
1231 txgbe_configure_msix(dev);
1233 /* initialize transmission unit */
1234 txgbe_dev_tx_init(dev);
1236 /* This can fail when allocating mbufs for descriptor rings */
1237 err = txgbe_dev_rx_init(dev);
1239 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1243 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1244 ETH_VLAN_EXTEND_MASK;
1245 err = txgbe_vlan_offload_config(dev, mask);
1247 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1251 err = txgbe_dev_rxtx_start(dev);
1253 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1257 /* Skip link setup if loopback mode is enabled. */
1258 if (hw->mac.type == txgbe_mac_raptor &&
1259 dev->data->dev_conf.lpbk_mode)
1260 goto skip_link_setup;
1262 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1263 err = hw->mac.setup_sfp(hw);
1268 if (hw->phy.media_type == txgbe_media_type_copper) {
1269 /* Turn on the copper */
1270 hw->phy.set_phy_power(hw, true);
1272 /* Turn on the laser */
1273 hw->mac.enable_tx_laser(hw);
1276 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1279 dev->data->dev_link.link_status = link_up;
1281 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1285 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1288 link_speeds = &dev->data->dev_conf.link_speeds;
1289 if (*link_speeds & ~allowed_speeds) {
1290 PMD_INIT_LOG(ERR, "Invalid link setting");
1295 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1296 speed = (TXGBE_LINK_SPEED_100M_FULL |
1297 TXGBE_LINK_SPEED_1GB_FULL |
1298 TXGBE_LINK_SPEED_10GB_FULL);
1300 if (*link_speeds & ETH_LINK_SPEED_10G)
1301 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1302 if (*link_speeds & ETH_LINK_SPEED_5G)
1303 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1304 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1305 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1306 if (*link_speeds & ETH_LINK_SPEED_1G)
1307 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1308 if (*link_speeds & ETH_LINK_SPEED_100M)
1309 speed |= TXGBE_LINK_SPEED_100M_FULL;
1312 err = hw->mac.setup_link(hw, speed, link_up);
1318 if (rte_intr_allow_others(intr_handle)) {
1319 /* check if lsc interrupt is enabled */
1320 if (dev->data->dev_conf.intr_conf.lsc != 0)
1321 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1323 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1324 txgbe_dev_macsec_interrupt_setup(dev);
1325 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1327 rte_intr_callback_unregister(intr_handle,
1328 txgbe_dev_interrupt_handler, dev);
1329 if (dev->data->dev_conf.intr_conf.lsc != 0)
1330 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1331 " no intr multiplex");
1334 /* check if rxq interrupt is enabled */
1335 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1336 rte_intr_dp_is_en(intr_handle))
1337 txgbe_dev_rxq_interrupt_setup(dev);
1339 /* enable uio/vfio intr/eventfd mapping */
1340 rte_intr_enable(intr_handle);
1342 /* resume enabled intr since hw reset */
1343 txgbe_enable_intr(dev);
1346 * Update link status right before return, because it may
1347 * start link configuration process in a separate thread.
1349 txgbe_dev_link_update(dev, 0);
1351 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1353 txgbe_read_stats_registers(hw, hw_stats);
1354 hw->offset_loaded = 1;
1359 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1360 txgbe_dev_clear_queues(dev);
1365 * Stop device: disable rx and tx functions to allow for reconfiguring.
1368 txgbe_dev_stop(struct rte_eth_dev *dev)
1370 struct rte_eth_link link;
1371 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1372 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1373 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1375 if (hw->adapter_stopped)
1378 PMD_INIT_FUNC_TRACE();
1380 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1382 /* disable interrupts */
1383 txgbe_disable_intr(hw);
1386 txgbe_pf_reset_hw(hw);
1387 hw->adapter_stopped = 0;
1392 if (hw->phy.media_type == txgbe_media_type_copper) {
1393 /* Turn off the copper */
1394 hw->phy.set_phy_power(hw, false);
1396 /* Turn off the laser */
1397 hw->mac.disable_tx_laser(hw);
1400 txgbe_dev_clear_queues(dev);
1402 /* Clear stored conf */
1403 dev->data->scattered_rx = 0;
1406 /* Clear recorded link status */
1407 memset(&link, 0, sizeof(link));
1408 rte_eth_linkstatus_set(dev, &link);
1410 if (!rte_intr_allow_others(intr_handle))
1411 /* resume to the default handler */
1412 rte_intr_callback_register(intr_handle,
1413 txgbe_dev_interrupt_handler,
1416 /* Clean datapath event and queue/vec mapping */
1417 rte_intr_efd_disable(intr_handle);
1418 if (intr_handle->intr_vec != NULL) {
1419 rte_free(intr_handle->intr_vec);
1420 intr_handle->intr_vec = NULL;
1423 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1425 hw->adapter_stopped = true;
1426 dev->data->dev_started = 0;
1432 * Set device link up: enable tx.
1435 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1437 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1439 if (hw->phy.media_type == txgbe_media_type_copper) {
1440 /* Turn on the copper */
1441 hw->phy.set_phy_power(hw, true);
1443 /* Turn on the laser */
1444 hw->mac.enable_tx_laser(hw);
1445 txgbe_dev_link_update(dev, 0);
1452 * Set device link down: disable tx.
1455 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1457 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1459 if (hw->phy.media_type == txgbe_media_type_copper) {
1460 /* Turn off the copper */
1461 hw->phy.set_phy_power(hw, false);
1463 /* Turn off the laser */
1464 hw->mac.disable_tx_laser(hw);
1465 txgbe_dev_link_update(dev, 0);
1472 * Reset and stop device.
1475 txgbe_dev_close(struct rte_eth_dev *dev)
1477 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1478 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1479 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1483 PMD_INIT_FUNC_TRACE();
1485 txgbe_pf_reset_hw(hw);
1487 ret = txgbe_dev_stop(dev);
1489 txgbe_dev_free_queues(dev);
1491 /* reprogram the RAR[0] in case user changed it. */
1492 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1494 /* Unlock any pending hardware semaphore */
1495 txgbe_swfw_lock_reset(hw);
1497 /* disable uio intr before callback unregister */
1498 rte_intr_disable(intr_handle);
1501 ret = rte_intr_callback_unregister(intr_handle,
1502 txgbe_dev_interrupt_handler, dev);
1503 if (ret >= 0 || ret == -ENOENT) {
1505 } else if (ret != -EAGAIN) {
1507 "intr callback unregister failed: %d",
1511 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1513 /* cancel the delay handler before remove dev */
1514 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1516 /* uninitialize PF if max_vfs not zero */
1517 txgbe_pf_host_uninit(dev);
1519 rte_free(dev->data->mac_addrs);
1520 dev->data->mac_addrs = NULL;
1522 rte_free(dev->data->hash_mac_addrs);
1523 dev->data->hash_mac_addrs = NULL;
1532 txgbe_dev_reset(struct rte_eth_dev *dev)
1536 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1537 * its VF to make them align with it. The detailed notification
1538 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1539 * To avoid unexpected behavior in VF, currently reset of PF with
1540 * SR-IOV activation is not supported. It might be supported later.
1542 if (dev->data->sriov.active)
1545 ret = eth_txgbe_dev_uninit(dev);
1549 ret = eth_txgbe_dev_init(dev, NULL);
1554 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1556 uint32_t current_counter = rd32(hw, reg); \
1557 if (current_counter < last_counter) \
1558 current_counter += 0x100000000LL; \
1559 if (!hw->offset_loaded) \
1560 last_counter = current_counter; \
1561 counter = current_counter - last_counter; \
1562 counter &= 0xFFFFFFFFLL; \
1565 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1567 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1568 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1569 uint64_t current_counter = (current_counter_msb << 32) | \
1570 current_counter_lsb; \
1571 if (current_counter < last_counter) \
1572 current_counter += 0x1000000000LL; \
1573 if (!hw->offset_loaded) \
1574 last_counter = current_counter; \
1575 counter = current_counter - last_counter; \
1576 counter &= 0xFFFFFFFFFLL; \
1580 txgbe_read_stats_registers(struct txgbe_hw *hw,
1581 struct txgbe_hw_stats *hw_stats)
1586 for (i = 0; i < hw->nb_rx_queues; i++) {
1587 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1588 hw->qp_last[i].rx_qp_packets,
1589 hw_stats->qp[i].rx_qp_packets);
1590 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1591 hw->qp_last[i].rx_qp_bytes,
1592 hw_stats->qp[i].rx_qp_bytes);
1593 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1594 hw->qp_last[i].rx_qp_mc_packets,
1595 hw_stats->qp[i].rx_qp_mc_packets);
1598 for (i = 0; i < hw->nb_tx_queues; i++) {
1599 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1600 hw->qp_last[i].tx_qp_packets,
1601 hw_stats->qp[i].tx_qp_packets);
1602 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1603 hw->qp_last[i].tx_qp_bytes,
1604 hw_stats->qp[i].tx_qp_bytes);
1607 for (i = 0; i < TXGBE_MAX_UP; i++) {
1608 hw_stats->up[i].rx_up_xon_packets +=
1609 rd32(hw, TXGBE_PBRXUPXON(i));
1610 hw_stats->up[i].rx_up_xoff_packets +=
1611 rd32(hw, TXGBE_PBRXUPXOFF(i));
1612 hw_stats->up[i].tx_up_xon_packets +=
1613 rd32(hw, TXGBE_PBTXUPXON(i));
1614 hw_stats->up[i].tx_up_xoff_packets +=
1615 rd32(hw, TXGBE_PBTXUPXOFF(i));
1616 hw_stats->up[i].tx_up_xon2off_packets +=
1617 rd32(hw, TXGBE_PBTXUPOFF(i));
1618 hw_stats->up[i].rx_up_dropped +=
1619 rd32(hw, TXGBE_PBRXMISS(i));
1621 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1622 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1623 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1624 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1627 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1628 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1630 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1631 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1632 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1635 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1636 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1637 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1639 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1640 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1641 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1643 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1644 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1646 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1647 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1648 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1649 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1650 hw_stats->rx_size_512_to_1023_packets +=
1651 rd64(hw, TXGBE_MACRX512TO1023L);
1652 hw_stats->rx_size_1024_to_max_packets +=
1653 rd64(hw, TXGBE_MACRX1024TOMAXL);
1654 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1655 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1656 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1657 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1658 hw_stats->tx_size_512_to_1023_packets +=
1659 rd64(hw, TXGBE_MACTX512TO1023L);
1660 hw_stats->tx_size_1024_to_max_packets +=
1661 rd64(hw, TXGBE_MACTX1024TOMAXL);
1663 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1664 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1665 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1668 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1669 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1670 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1671 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1674 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1675 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1676 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1677 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1678 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1679 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1680 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1682 /* Flow Director Stats */
1683 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1684 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1685 hw_stats->flow_director_added_filters +=
1686 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1687 hw_stats->flow_director_removed_filters +=
1688 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1689 hw_stats->flow_director_filter_add_errors +=
1690 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1691 hw_stats->flow_director_filter_remove_errors +=
1692 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1695 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1696 hw_stats->tx_macsec_pkts_encrypted +=
1697 rd32(hw, TXGBE_LSECTX_ENCPKT);
1698 hw_stats->tx_macsec_pkts_protected +=
1699 rd32(hw, TXGBE_LSECTX_PROTPKT);
1700 hw_stats->tx_macsec_octets_encrypted +=
1701 rd32(hw, TXGBE_LSECTX_ENCOCT);
1702 hw_stats->tx_macsec_octets_protected +=
1703 rd32(hw, TXGBE_LSECTX_PROTOCT);
1704 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1705 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1706 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1707 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1708 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1709 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1710 hw_stats->rx_macsec_sc_pkts_unchecked +=
1711 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1712 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1713 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1714 for (i = 0; i < 2; i++) {
1715 hw_stats->rx_macsec_sa_pkts_ok +=
1716 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1717 hw_stats->rx_macsec_sa_pkts_invalid +=
1718 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1719 hw_stats->rx_macsec_sa_pkts_notvalid +=
1720 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1722 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1723 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1724 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1725 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1727 hw_stats->rx_total_missed_packets = 0;
1728 for (i = 0; i < TXGBE_MAX_UP; i++) {
1729 hw_stats->rx_total_missed_packets +=
1730 hw_stats->up[i].rx_up_dropped;
1735 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1737 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1738 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1739 struct txgbe_stat_mappings *stat_mappings =
1740 TXGBE_DEV_STAT_MAPPINGS(dev);
1743 txgbe_read_stats_registers(hw, hw_stats);
1748 /* Fill out the rte_eth_stats statistics structure */
1749 stats->ipackets = hw_stats->rx_packets;
1750 stats->ibytes = hw_stats->rx_bytes;
1751 stats->opackets = hw_stats->tx_packets;
1752 stats->obytes = hw_stats->tx_bytes;
1754 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1755 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1756 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1757 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1758 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1759 for (i = 0; i < TXGBE_MAX_QP; i++) {
1760 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1761 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1764 q_map = (stat_mappings->rqsm[n] >> offset)
1765 & QMAP_FIELD_RESERVED_BITS_MASK;
1766 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1767 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1768 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1769 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1771 q_map = (stat_mappings->tqsm[n] >> offset)
1772 & QMAP_FIELD_RESERVED_BITS_MASK;
1773 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1774 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1775 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1776 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1780 stats->imissed = hw_stats->rx_total_missed_packets;
1781 stats->ierrors = hw_stats->rx_crc_errors +
1782 hw_stats->rx_mac_short_packet_dropped +
1783 hw_stats->rx_length_errors +
1784 hw_stats->rx_undersize_errors +
1785 hw_stats->rx_oversize_errors +
1786 hw_stats->rx_drop_packets +
1787 hw_stats->rx_illegal_byte_errors +
1788 hw_stats->rx_error_bytes +
1789 hw_stats->rx_fragment_errors +
1790 hw_stats->rx_fcoe_crc_errors +
1791 hw_stats->rx_fcoe_mbuf_allocation_errors;
1799 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1801 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1802 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1804 /* HW registers are cleared on read */
1805 hw->offset_loaded = 0;
1806 txgbe_dev_stats_get(dev, NULL);
1807 hw->offset_loaded = 1;
1809 /* Reset software totals */
1810 memset(hw_stats, 0, sizeof(*hw_stats));
1815 /* This function calculates the number of xstats based on the current config */
1817 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1819 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1820 return TXGBE_NB_HW_STATS +
1821 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1822 TXGBE_NB_QP_STATS * nb_queues;
1826 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1830 /* Extended stats from txgbe_hw_stats */
1831 if (id < TXGBE_NB_HW_STATS) {
1832 snprintf(name, size, "[hw]%s",
1833 rte_txgbe_stats_strings[id].name);
1836 id -= TXGBE_NB_HW_STATS;
1838 /* Priority Stats */
1839 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1840 nb = id / TXGBE_NB_UP_STATS;
1841 st = id % TXGBE_NB_UP_STATS;
1842 snprintf(name, size, "[p%u]%s", nb,
1843 rte_txgbe_up_strings[st].name);
1846 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1849 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1850 nb = id / TXGBE_NB_QP_STATS;
1851 st = id % TXGBE_NB_QP_STATS;
1852 snprintf(name, size, "[q%u]%s", nb,
1853 rte_txgbe_qp_strings[st].name);
1856 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1858 return -(int)(id + 1);
1862 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1866 /* Extended stats from txgbe_hw_stats */
1867 if (id < TXGBE_NB_HW_STATS) {
1868 *offset = rte_txgbe_stats_strings[id].offset;
1871 id -= TXGBE_NB_HW_STATS;
1873 /* Priority Stats */
1874 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
1875 nb = id / TXGBE_NB_UP_STATS;
1876 st = id % TXGBE_NB_UP_STATS;
1877 *offset = rte_txgbe_up_strings[st].offset +
1878 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
1881 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
1884 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
1885 nb = id / TXGBE_NB_QP_STATS;
1886 st = id % TXGBE_NB_QP_STATS;
1887 *offset = rte_txgbe_qp_strings[st].offset +
1888 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
1891 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
1893 return -(int)(id + 1);
1896 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1897 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1899 unsigned int i, count;
1901 count = txgbe_xstats_calc_num(dev);
1902 if (xstats_names == NULL)
1905 /* Note: limit >= cnt_stats checked upstream
1906 * in rte_eth_xstats_names()
1908 limit = min(limit, count);
1910 /* Extended stats from txgbe_hw_stats */
1911 for (i = 0; i < limit; i++) {
1912 if (txgbe_get_name_by_id(i, xstats_names[i].name,
1913 sizeof(xstats_names[i].name))) {
1914 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1922 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1923 struct rte_eth_xstat_name *xstats_names,
1924 const uint64_t *ids,
1930 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
1932 for (i = 0; i < limit; i++) {
1933 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
1934 sizeof(xstats_names[i].name))) {
1935 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1944 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1947 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1948 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1949 unsigned int i, count;
1951 txgbe_read_stats_registers(hw, hw_stats);
1953 /* If this is a reset xstats is NULL, and we have cleared the
1954 * registers by reading them.
1956 count = txgbe_xstats_calc_num(dev);
1960 limit = min(limit, txgbe_xstats_calc_num(dev));
1962 /* Extended stats from txgbe_hw_stats */
1963 for (i = 0; i < limit; i++) {
1964 uint32_t offset = 0;
1966 if (txgbe_get_offset_by_id(i, &offset)) {
1967 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1970 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1978 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1981 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1982 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1983 unsigned int i, count;
1985 txgbe_read_stats_registers(hw, hw_stats);
1987 /* If this is a reset xstats is NULL, and we have cleared the
1988 * registers by reading them.
1990 count = txgbe_xstats_calc_num(dev);
1994 limit = min(limit, txgbe_xstats_calc_num(dev));
1996 /* Extended stats from txgbe_hw_stats */
1997 for (i = 0; i < limit; i++) {
2000 if (txgbe_get_offset_by_id(i, &offset)) {
2001 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2004 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2011 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2012 uint64_t *values, unsigned int limit)
2014 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2018 return txgbe_dev_xstats_get_(dev, values, limit);
2020 for (i = 0; i < limit; i++) {
2023 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2024 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2027 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2034 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2036 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2037 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2039 /* HW registers are cleared on read */
2040 hw->offset_loaded = 0;
2041 txgbe_read_stats_registers(hw, hw_stats);
2042 hw->offset_loaded = 1;
2044 /* Reset software totals */
2045 memset(hw_stats, 0, sizeof(*hw_stats));
2051 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2053 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2054 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2056 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2057 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2058 dev_info->min_rx_bufsize = 1024;
2059 dev_info->max_rx_pktlen = 15872;
2060 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2061 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2062 dev_info->max_vfs = pci_dev->max_vfs;
2063 dev_info->max_vmdq_pools = ETH_64_POOLS;
2064 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2065 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2066 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2067 dev_info->rx_queue_offload_capa);
2068 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2069 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2071 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2073 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2074 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2075 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2077 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2082 dev_info->default_txconf = (struct rte_eth_txconf) {
2084 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2085 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2086 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2088 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2092 dev_info->rx_desc_lim = rx_desc_lim;
2093 dev_info->tx_desc_lim = tx_desc_lim;
2095 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2096 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2097 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2099 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2100 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2102 /* Driver-preferred Rx/Tx parameters */
2103 dev_info->default_rxportconf.burst_size = 32;
2104 dev_info->default_txportconf.burst_size = 32;
2105 dev_info->default_rxportconf.nb_queues = 1;
2106 dev_info->default_txportconf.nb_queues = 1;
2107 dev_info->default_rxportconf.ring_size = 256;
2108 dev_info->default_txportconf.ring_size = 256;
2114 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2116 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2117 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2118 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2119 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2120 return txgbe_get_supported_ptypes();
2126 txgbe_dev_setup_link_alarm_handler(void *param)
2128 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2129 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2130 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2132 bool autoneg = false;
2134 speed = hw->phy.autoneg_advertised;
2136 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2138 hw->mac.setup_link(hw, speed, true);
2140 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2143 /* return 0 means link status changed, -1 means not changed */
2145 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2146 int wait_to_complete)
2148 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2149 struct rte_eth_link link;
2150 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2151 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2156 memset(&link, 0, sizeof(link));
2157 link.link_status = ETH_LINK_DOWN;
2158 link.link_speed = ETH_SPEED_NUM_NONE;
2159 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2160 link.link_autoneg = ETH_LINK_AUTONEG;
2162 hw->mac.get_link_status = true;
2164 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2165 return rte_eth_linkstatus_set(dev, &link);
2167 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2168 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2171 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2174 link.link_speed = ETH_SPEED_NUM_100M;
2175 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2176 return rte_eth_linkstatus_set(dev, &link);
2180 if (hw->phy.media_type == txgbe_media_type_fiber) {
2181 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2182 rte_eal_alarm_set(10,
2183 txgbe_dev_setup_link_alarm_handler, dev);
2185 return rte_eth_linkstatus_set(dev, &link);
2188 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2189 link.link_status = ETH_LINK_UP;
2190 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2192 switch (link_speed) {
2194 case TXGBE_LINK_SPEED_UNKNOWN:
2195 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2196 link.link_speed = ETH_SPEED_NUM_100M;
2199 case TXGBE_LINK_SPEED_100M_FULL:
2200 link.link_speed = ETH_SPEED_NUM_100M;
2203 case TXGBE_LINK_SPEED_1GB_FULL:
2204 link.link_speed = ETH_SPEED_NUM_1G;
2207 case TXGBE_LINK_SPEED_2_5GB_FULL:
2208 link.link_speed = ETH_SPEED_NUM_2_5G;
2211 case TXGBE_LINK_SPEED_5GB_FULL:
2212 link.link_speed = ETH_SPEED_NUM_5G;
2215 case TXGBE_LINK_SPEED_10GB_FULL:
2216 link.link_speed = ETH_SPEED_NUM_10G;
2220 return rte_eth_linkstatus_set(dev, &link);
2224 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2226 return txgbe_dev_link_update_share(dev, wait_to_complete);
2230 * It clears the interrupt causes and enables the interrupt.
2231 * It will be called once only during nic initialized.
2234 * Pointer to struct rte_eth_dev.
2236 * Enable or Disable.
2239 * - On success, zero.
2240 * - On failure, a negative value.
2243 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2245 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2247 txgbe_dev_link_status_print(dev);
2249 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2251 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2257 * It clears the interrupt causes and enables the interrupt.
2258 * It will be called once only during nic initialized.
2261 * Pointer to struct rte_eth_dev.
2264 * - On success, zero.
2265 * - On failure, a negative value.
2268 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2270 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2272 intr->mask[0] |= TXGBE_ICR_MASK;
2273 intr->mask[1] |= TXGBE_ICR_MASK;
2279 * It clears the interrupt causes and enables the interrupt.
2280 * It will be called once only during nic initialized.
2283 * Pointer to struct rte_eth_dev.
2286 * - On success, zero.
2287 * - On failure, a negative value.
2290 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2292 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2294 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2300 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2303 * Pointer to struct rte_eth_dev.
2306 * - On success, zero.
2307 * - On failure, a negative value.
2310 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2313 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2314 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2316 /* clear all cause mask */
2317 txgbe_disable_intr(hw);
2319 /* read-on-clear nic registers here */
2320 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2321 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2325 /* set flag for async link update */
2326 if (eicr & TXGBE_ICRMISC_LSC)
2327 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2329 if (eicr & TXGBE_ICRMISC_VFMBX)
2330 intr->flags |= TXGBE_FLAG_MAILBOX;
2332 if (eicr & TXGBE_ICRMISC_LNKSEC)
2333 intr->flags |= TXGBE_FLAG_MACSEC;
2335 if (eicr & TXGBE_ICRMISC_GPIO)
2336 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2342 * It gets and then prints the link status.
2345 * Pointer to struct rte_eth_dev.
2348 * - On success, zero.
2349 * - On failure, a negative value.
2352 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2354 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2355 struct rte_eth_link link;
2357 rte_eth_linkstatus_get(dev, &link);
2359 if (link.link_status) {
2360 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2361 (int)(dev->data->port_id),
2362 (unsigned int)link.link_speed,
2363 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2364 "full-duplex" : "half-duplex");
2366 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2367 (int)(dev->data->port_id));
2369 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2370 pci_dev->addr.domain,
2372 pci_dev->addr.devid,
2373 pci_dev->addr.function);
2377 * It executes link_update after knowing an interrupt occurred.
2380 * Pointer to struct rte_eth_dev.
2383 * - On success, zero.
2384 * - On failure, a negative value.
2387 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2388 struct rte_intr_handle *intr_handle)
2390 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2392 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2394 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2396 if (intr->flags & TXGBE_FLAG_MAILBOX)
2397 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2399 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2400 hw->phy.handle_lasi(hw);
2401 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2404 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2405 struct rte_eth_link link;
2407 /*get the link status before link update, for predicting later*/
2408 rte_eth_linkstatus_get(dev, &link);
2410 txgbe_dev_link_update(dev, 0);
2413 if (!link.link_status)
2414 /* handle it 1 sec later, wait it being stable */
2415 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2416 /* likely to down */
2418 /* handle it 4 sec later, wait it being stable */
2419 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2421 txgbe_dev_link_status_print(dev);
2422 if (rte_eal_alarm_set(timeout * 1000,
2423 txgbe_dev_interrupt_delayed_handler,
2425 PMD_DRV_LOG(ERR, "Error setting alarm");
2427 /* remember original mask */
2428 intr->mask_misc_orig = intr->mask_misc;
2429 /* only disable lsc interrupt */
2430 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2434 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2435 txgbe_enable_intr(dev);
2436 rte_intr_enable(intr_handle);
2442 * Interrupt handler which shall be registered for alarm callback for delayed
2443 * handling specific interrupt to wait for the stable nic state. As the
2444 * NIC interrupt state is not stable for txgbe after link is just down,
2445 * it needs to wait 4 seconds to get the stable status.
2448 * Pointer to interrupt handle.
2450 * The address of parameter (struct rte_eth_dev *) registered before.
2456 txgbe_dev_interrupt_delayed_handler(void *param)
2458 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2459 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2460 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2461 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2462 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2465 txgbe_disable_intr(hw);
2467 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2469 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2470 hw->phy.handle_lasi(hw);
2471 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2474 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2475 txgbe_dev_link_update(dev, 0);
2476 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2477 txgbe_dev_link_status_print(dev);
2478 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2482 if (intr->flags & TXGBE_FLAG_MACSEC) {
2483 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2485 intr->flags &= ~TXGBE_FLAG_MACSEC;
2488 /* restore original mask */
2489 intr->mask_misc = intr->mask_misc_orig;
2490 intr->mask_misc_orig = 0;
2492 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2493 txgbe_enable_intr(dev);
2494 rte_intr_enable(intr_handle);
2498 * Interrupt handler triggered by NIC for handling
2499 * specific interrupt.
2502 * Pointer to interrupt handle.
2504 * The address of parameter (struct rte_eth_dev *) registered before.
2510 txgbe_dev_interrupt_handler(void *param)
2512 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2514 txgbe_dev_interrupt_get_status(dev);
2515 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2519 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2520 uint32_t index, uint32_t pool)
2522 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2523 uint32_t enable_addr = 1;
2525 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2530 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2532 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2534 txgbe_clear_rar(hw, index);
2538 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2540 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2542 txgbe_remove_rar(dev, 0);
2543 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2549 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2551 uint32_t vector = 0;
2553 switch (hw->mac.mc_filter_type) {
2554 case 0: /* use bits [47:36] of the address */
2555 vector = ((uc_addr->addr_bytes[4] >> 4) |
2556 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2558 case 1: /* use bits [46:35] of the address */
2559 vector = ((uc_addr->addr_bytes[4] >> 3) |
2560 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2562 case 2: /* use bits [45:34] of the address */
2563 vector = ((uc_addr->addr_bytes[4] >> 2) |
2564 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2566 case 3: /* use bits [43:32] of the address */
2567 vector = ((uc_addr->addr_bytes[4]) |
2568 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2570 default: /* Invalid mc_filter_type */
2574 /* vector can only be 12-bits or boundary will be exceeded */
2580 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
2581 struct rte_ether_addr *mac_addr, uint8_t on)
2589 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2590 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2592 /* The UTA table only exists on pf hardware */
2593 if (hw->mac.type < txgbe_mac_raptor)
2596 vector = txgbe_uta_vector(hw, mac_addr);
2597 uta_idx = (vector >> 5) & 0x7F;
2598 uta_mask = 0x1UL << (vector & 0x1F);
2600 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2603 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
2605 uta_info->uta_in_use++;
2606 reg_val |= uta_mask;
2607 uta_info->uta_shadow[uta_idx] |= uta_mask;
2609 uta_info->uta_in_use--;
2610 reg_val &= ~uta_mask;
2611 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2614 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
2616 psrctl = rd32(hw, TXGBE_PSRCTL);
2617 if (uta_info->uta_in_use > 0)
2618 psrctl |= TXGBE_PSRCTL_UCHFENA;
2620 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2622 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2623 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2624 wr32(hw, TXGBE_PSRCTL, psrctl);
2630 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2632 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2633 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
2637 /* The UTA table only exists on pf hardware */
2638 if (hw->mac.type < txgbe_mac_raptor)
2642 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2643 uta_info->uta_shadow[i] = ~0;
2644 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
2647 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2648 uta_info->uta_shadow[i] = 0;
2649 wr32(hw, TXGBE_UCADDRTBL(i), 0);
2653 psrctl = rd32(hw, TXGBE_PSRCTL);
2655 psrctl |= TXGBE_PSRCTL_UCHFENA;
2657 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
2659 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
2660 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2661 wr32(hw, TXGBE_PSRCTL, psrctl);
2667 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2669 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2670 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2672 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2674 if (queue_id < 32) {
2675 mask = rd32(hw, TXGBE_IMS(0));
2676 mask &= (1 << queue_id);
2677 wr32(hw, TXGBE_IMS(0), mask);
2678 } else if (queue_id < 64) {
2679 mask = rd32(hw, TXGBE_IMS(1));
2680 mask &= (1 << (queue_id - 32));
2681 wr32(hw, TXGBE_IMS(1), mask);
2683 rte_intr_enable(intr_handle);
2689 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2692 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2694 if (queue_id < 32) {
2695 mask = rd32(hw, TXGBE_IMS(0));
2696 mask &= ~(1 << queue_id);
2697 wr32(hw, TXGBE_IMS(0), mask);
2698 } else if (queue_id < 64) {
2699 mask = rd32(hw, TXGBE_IMS(1));
2700 mask &= ~(1 << (queue_id - 32));
2701 wr32(hw, TXGBE_IMS(1), mask);
2708 * set the IVAR registers, mapping interrupt causes to vectors
2710 * pointer to txgbe_hw struct
2712 * 0 for Rx, 1 for Tx, -1 for other causes
2714 * queue to map the corresponding interrupt to
2716 * the vector to map to the corresponding queue
2719 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
2720 uint8_t queue, uint8_t msix_vector)
2724 if (direction == -1) {
2726 msix_vector |= TXGBE_IVARMISC_VLD;
2728 tmp = rd32(hw, TXGBE_IVARMISC);
2729 tmp &= ~(0xFF << idx);
2730 tmp |= (msix_vector << idx);
2731 wr32(hw, TXGBE_IVARMISC, tmp);
2733 /* rx or tx causes */
2734 /* Workround for ICR lost */
2735 idx = ((16 * (queue & 1)) + (8 * direction));
2736 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
2737 tmp &= ~(0xFF << idx);
2738 tmp |= (msix_vector << idx);
2739 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
2744 * Sets up the hardware to properly generate MSI-X interrupts
2746 * board private structure
2749 txgbe_configure_msix(struct rte_eth_dev *dev)
2751 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2752 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2753 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2754 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
2755 uint32_t vec = TXGBE_MISC_VEC_ID;
2758 /* won't configure msix register if no mapping is done
2759 * between intr vector and event fd
2760 * but if misx has been enabled already, need to configure
2761 * auto clean, auto mask and throttling.
2763 gpie = rd32(hw, TXGBE_GPIE);
2764 if (!rte_intr_dp_is_en(intr_handle) &&
2765 !(gpie & TXGBE_GPIE_MSIX))
2768 if (rte_intr_allow_others(intr_handle)) {
2769 base = TXGBE_RX_VEC_START;
2773 /* setup GPIE for MSI-x mode */
2774 gpie = rd32(hw, TXGBE_GPIE);
2775 gpie |= TXGBE_GPIE_MSIX;
2776 wr32(hw, TXGBE_GPIE, gpie);
2778 /* Populate the IVAR table and set the ITR values to the
2779 * corresponding register.
2781 if (rte_intr_dp_is_en(intr_handle)) {
2782 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2784 /* by default, 1:1 mapping */
2785 txgbe_set_ivar_map(hw, 0, queue_id, vec);
2786 intr_handle->intr_vec[queue_id] = vec;
2787 if (vec < base + intr_handle->nb_efd - 1)
2791 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
2793 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
2794 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2799 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
2800 u8 **mc_addr_ptr, u32 *vmdq)
2805 mc_addr = *mc_addr_ptr;
2806 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2811 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2812 struct rte_ether_addr *mc_addr_set,
2813 uint32_t nb_mc_addr)
2815 struct txgbe_hw *hw;
2818 hw = TXGBE_DEV_HW(dev);
2819 mc_addr_list = (u8 *)mc_addr_set;
2820 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2821 txgbe_dev_addr_list_itr, TRUE);
2824 static const struct eth_dev_ops txgbe_eth_dev_ops = {
2825 .dev_configure = txgbe_dev_configure,
2826 .dev_infos_get = txgbe_dev_info_get,
2827 .dev_start = txgbe_dev_start,
2828 .dev_stop = txgbe_dev_stop,
2829 .dev_set_link_up = txgbe_dev_set_link_up,
2830 .dev_set_link_down = txgbe_dev_set_link_down,
2831 .dev_close = txgbe_dev_close,
2832 .dev_reset = txgbe_dev_reset,
2833 .link_update = txgbe_dev_link_update,
2834 .stats_get = txgbe_dev_stats_get,
2835 .xstats_get = txgbe_dev_xstats_get,
2836 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
2837 .stats_reset = txgbe_dev_stats_reset,
2838 .xstats_reset = txgbe_dev_xstats_reset,
2839 .xstats_get_names = txgbe_dev_xstats_get_names,
2840 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
2841 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
2842 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
2843 .vlan_filter_set = txgbe_vlan_filter_set,
2844 .vlan_tpid_set = txgbe_vlan_tpid_set,
2845 .vlan_offload_set = txgbe_vlan_offload_set,
2846 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
2847 .rx_queue_start = txgbe_dev_rx_queue_start,
2848 .rx_queue_stop = txgbe_dev_rx_queue_stop,
2849 .tx_queue_start = txgbe_dev_tx_queue_start,
2850 .tx_queue_stop = txgbe_dev_tx_queue_stop,
2851 .rx_queue_setup = txgbe_dev_rx_queue_setup,
2852 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
2853 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
2854 .rx_queue_release = txgbe_dev_rx_queue_release,
2855 .tx_queue_setup = txgbe_dev_tx_queue_setup,
2856 .tx_queue_release = txgbe_dev_tx_queue_release,
2857 .mac_addr_add = txgbe_add_rar,
2858 .mac_addr_remove = txgbe_remove_rar,
2859 .mac_addr_set = txgbe_set_default_mac_addr,
2860 .uc_hash_table_set = txgbe_uc_hash_table_set,
2861 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
2862 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
2863 .rxq_info_get = txgbe_rxq_info_get,
2864 .txq_info_get = txgbe_txq_info_get,
2867 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
2868 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
2869 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2871 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
2872 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
2874 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
2875 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
2877 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
2878 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
2881 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
2882 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);