1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
26 static const struct reg_info txgbe_regs_general[] = {
27 {TXGBE_RST, 1, 1, "TXGBE_RST"},
28 {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30 {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
36 static const struct reg_info txgbe_regs_nvm[] = {
40 static const struct reg_info txgbe_regs_interrupt[] = {
44 static const struct reg_info txgbe_regs_fctl_others[] = {
48 static const struct reg_info txgbe_regs_rxdma[] = {
52 static const struct reg_info txgbe_regs_rx[] = {
56 static struct reg_info txgbe_regs_tx[] = {
60 static const struct reg_info txgbe_regs_wakeup[] = {
64 static const struct reg_info txgbe_regs_dcb[] = {
68 static const struct reg_info txgbe_regs_mac[] = {
72 static const struct reg_info txgbe_regs_diagnostic[] = {
77 static const struct reg_info *txgbe_regs_others[] = {
81 txgbe_regs_fctl_others,
88 txgbe_regs_diagnostic,
91 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
92 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
93 static int txgbe_dev_close(struct rte_eth_dev *dev);
94 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
95 int wait_to_complete);
96 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
97 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
98 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
101 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
102 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
103 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
104 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
105 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
106 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
107 struct rte_intr_handle *handle);
108 static void txgbe_dev_interrupt_handler(void *param);
109 static void txgbe_dev_interrupt_delayed_handler(void *param);
110 static void txgbe_configure_msix(struct rte_eth_dev *dev);
112 #define TXGBE_SET_HWSTRIP(h, q) do {\
113 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
114 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
115 (h)->bitmap[idx] |= 1 << bit;\
118 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
119 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
120 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
121 (h)->bitmap[idx] &= ~(1 << bit);\
124 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
125 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
126 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
127 (r) = (h)->bitmap[idx] >> bit & 1;\
131 * The set of PCI devices this driver supports
133 static const struct rte_pci_id pci_id_txgbe_map[] = {
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
136 { .vendor_id = 0, /* sentinel */ },
139 static const struct rte_eth_desc_lim rx_desc_lim = {
140 .nb_max = TXGBE_RING_DESC_MAX,
141 .nb_min = TXGBE_RING_DESC_MIN,
142 .nb_align = TXGBE_RXD_ALIGN,
145 static const struct rte_eth_desc_lim tx_desc_lim = {
146 .nb_max = TXGBE_RING_DESC_MAX,
147 .nb_min = TXGBE_RING_DESC_MIN,
148 .nb_align = TXGBE_TXD_ALIGN,
149 .nb_seg_max = TXGBE_TX_MAX_SEG,
150 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
153 static const struct eth_dev_ops txgbe_eth_dev_ops;
155 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
156 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
157 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
159 HW_XSTAT(mng_bmc2host_packets),
160 HW_XSTAT(mng_host2bmc_packets),
162 HW_XSTAT(rx_packets),
163 HW_XSTAT(tx_packets),
166 HW_XSTAT(rx_total_bytes),
167 HW_XSTAT(rx_total_packets),
168 HW_XSTAT(tx_total_packets),
169 HW_XSTAT(rx_total_missed_packets),
170 HW_XSTAT(rx_broadcast_packets),
171 HW_XSTAT(rx_multicast_packets),
172 HW_XSTAT(rx_management_packets),
173 HW_XSTAT(tx_management_packets),
174 HW_XSTAT(rx_management_dropped),
177 HW_XSTAT(rx_crc_errors),
178 HW_XSTAT(rx_illegal_byte_errors),
179 HW_XSTAT(rx_error_bytes),
180 HW_XSTAT(rx_mac_short_packet_dropped),
181 HW_XSTAT(rx_length_errors),
182 HW_XSTAT(rx_undersize_errors),
183 HW_XSTAT(rx_fragment_errors),
184 HW_XSTAT(rx_oversize_errors),
185 HW_XSTAT(rx_jabber_errors),
186 HW_XSTAT(rx_l3_l4_xsum_error),
187 HW_XSTAT(mac_local_errors),
188 HW_XSTAT(mac_remote_errors),
191 HW_XSTAT(flow_director_added_filters),
192 HW_XSTAT(flow_director_removed_filters),
193 HW_XSTAT(flow_director_filter_add_errors),
194 HW_XSTAT(flow_director_filter_remove_errors),
195 HW_XSTAT(flow_director_matched_filters),
196 HW_XSTAT(flow_director_missed_filters),
199 HW_XSTAT(rx_fcoe_crc_errors),
200 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
201 HW_XSTAT(rx_fcoe_dropped),
202 HW_XSTAT(rx_fcoe_packets),
203 HW_XSTAT(tx_fcoe_packets),
204 HW_XSTAT(rx_fcoe_bytes),
205 HW_XSTAT(tx_fcoe_bytes),
206 HW_XSTAT(rx_fcoe_no_ddp),
207 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
210 HW_XSTAT(tx_macsec_pkts_untagged),
211 HW_XSTAT(tx_macsec_pkts_encrypted),
212 HW_XSTAT(tx_macsec_pkts_protected),
213 HW_XSTAT(tx_macsec_octets_encrypted),
214 HW_XSTAT(tx_macsec_octets_protected),
215 HW_XSTAT(rx_macsec_pkts_untagged),
216 HW_XSTAT(rx_macsec_pkts_badtag),
217 HW_XSTAT(rx_macsec_pkts_nosci),
218 HW_XSTAT(rx_macsec_pkts_unknownsci),
219 HW_XSTAT(rx_macsec_octets_decrypted),
220 HW_XSTAT(rx_macsec_octets_validated),
221 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
222 HW_XSTAT(rx_macsec_sc_pkts_delayed),
223 HW_XSTAT(rx_macsec_sc_pkts_late),
224 HW_XSTAT(rx_macsec_sa_pkts_ok),
225 HW_XSTAT(rx_macsec_sa_pkts_invalid),
226 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
227 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
228 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
231 HW_XSTAT(rx_size_64_packets),
232 HW_XSTAT(rx_size_65_to_127_packets),
233 HW_XSTAT(rx_size_128_to_255_packets),
234 HW_XSTAT(rx_size_256_to_511_packets),
235 HW_XSTAT(rx_size_512_to_1023_packets),
236 HW_XSTAT(rx_size_1024_to_max_packets),
237 HW_XSTAT(tx_size_64_packets),
238 HW_XSTAT(tx_size_65_to_127_packets),
239 HW_XSTAT(tx_size_128_to_255_packets),
240 HW_XSTAT(tx_size_256_to_511_packets),
241 HW_XSTAT(tx_size_512_to_1023_packets),
242 HW_XSTAT(tx_size_1024_to_max_packets),
245 HW_XSTAT(tx_xon_packets),
246 HW_XSTAT(rx_xon_packets),
247 HW_XSTAT(tx_xoff_packets),
248 HW_XSTAT(rx_xoff_packets),
250 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
251 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
252 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
253 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
256 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
257 sizeof(rte_txgbe_stats_strings[0]))
259 /* Per-priority statistics */
260 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
261 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
262 UP_XSTAT(rx_up_packets),
263 UP_XSTAT(tx_up_packets),
264 UP_XSTAT(rx_up_bytes),
265 UP_XSTAT(tx_up_bytes),
266 UP_XSTAT(rx_up_drop_packets),
268 UP_XSTAT(tx_up_xon_packets),
269 UP_XSTAT(rx_up_xon_packets),
270 UP_XSTAT(tx_up_xoff_packets),
271 UP_XSTAT(rx_up_xoff_packets),
272 UP_XSTAT(rx_up_dropped),
273 UP_XSTAT(rx_up_mbuf_alloc_errors),
274 UP_XSTAT(tx_up_xon2off_packets),
277 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
278 sizeof(rte_txgbe_up_strings[0]))
280 /* Per-queue statistics */
281 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
282 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
283 QP_XSTAT(rx_qp_packets),
284 QP_XSTAT(tx_qp_packets),
285 QP_XSTAT(rx_qp_bytes),
286 QP_XSTAT(tx_qp_bytes),
287 QP_XSTAT(rx_qp_mc_packets),
290 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
291 sizeof(rte_txgbe_qp_strings[0]))
294 txgbe_is_sfp(struct txgbe_hw *hw)
296 switch (hw->phy.type) {
297 case txgbe_phy_sfp_avago:
298 case txgbe_phy_sfp_ftl:
299 case txgbe_phy_sfp_intel:
300 case txgbe_phy_sfp_unknown:
301 case txgbe_phy_sfp_tyco_passive:
302 case txgbe_phy_sfp_unknown_passive:
309 static inline int32_t
310 txgbe_pf_reset_hw(struct txgbe_hw *hw)
315 status = hw->mac.reset_hw(hw);
317 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
318 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
319 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
320 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
323 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
329 txgbe_enable_intr(struct rte_eth_dev *dev)
331 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
332 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
334 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
335 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
336 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
341 txgbe_disable_intr(struct txgbe_hw *hw)
343 PMD_INIT_FUNC_TRACE();
345 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
346 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
347 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
352 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
357 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
358 struct txgbe_stat_mappings *stat_mappings =
359 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
360 uint32_t qsmr_mask = 0;
361 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
365 if (hw->mac.type != txgbe_mac_raptor)
368 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
371 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
372 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
375 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
376 if (n >= TXGBE_NB_STAT_MAPPING) {
377 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
380 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
382 /* Now clear any previous stat_idx set */
383 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
385 stat_mappings->tqsm[n] &= ~clearing_mask;
387 stat_mappings->rqsm[n] &= ~clearing_mask;
389 q_map = (uint32_t)stat_idx;
390 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
391 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
393 stat_mappings->tqsm[n] |= qsmr_mask;
395 stat_mappings->rqsm[n] |= qsmr_mask;
397 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
398 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
400 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
401 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
406 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
410 struct txgbe_dcb_tc_config *tc;
412 UNREFERENCED_PARAMETER(hw);
414 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
415 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
416 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
417 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
418 tc = &dcb_config->tc_config[i];
419 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
420 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
421 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
422 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
423 tc->pfc = txgbe_dcb_pfc_disabled;
426 /* Initialize default user to priority mapping, UPx->TC0 */
427 tc = &dcb_config->tc_config[0];
428 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
429 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
430 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
431 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
432 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
434 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
435 dcb_config->pfc_mode_enable = false;
436 dcb_config->vt_mode = true;
437 dcb_config->round_robin_enable = false;
438 /* support all DCB capabilities */
439 dcb_config->support.capabilities = 0xFF;
443 * Ensure that all locks are released before first NVM or PHY access
446 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
451 * These ones are more tricky since they are common to all ports; but
452 * swfw_sync retries last long enough (1s) to be almost sure that if
453 * lock can not be taken it is due to an improper lock of the
456 mask = TXGBE_MNGSEM_SWPHY |
458 TXGBE_MNGSEM_SWFLASH;
459 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
460 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
462 hw->mac.release_swfw_sync(hw, mask);
466 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
468 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
469 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
470 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
471 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
472 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
473 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
474 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
475 const struct rte_memzone *mz;
480 PMD_INIT_FUNC_TRACE();
482 eth_dev->dev_ops = &txgbe_eth_dev_ops;
483 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
484 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
485 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
486 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
487 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
488 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
491 * For secondary processes, we don't initialise any further as primary
492 * has already done this work. Only check we don't need a different
493 * RX and TX function.
495 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
496 struct txgbe_tx_queue *txq;
497 /* TX queue function in primary, set by last queue initialized
498 * Tx queue may not initialized by primary process
500 if (eth_dev->data->tx_queues) {
501 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
502 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
503 txgbe_set_tx_function(eth_dev, txq);
505 /* Use default TX function if we get here */
506 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
507 "Using default TX function.");
510 txgbe_set_rx_function(eth_dev);
515 rte_eth_copy_pci_info(eth_dev, pci_dev);
517 /* Vendor and Device ID need to be set before init of shared code */
518 hw->device_id = pci_dev->id.device_id;
519 hw->vendor_id = pci_dev->id.vendor_id;
520 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
521 hw->allow_unsupported_sfp = 1;
523 /* Reserve memory for interrupt status block */
524 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
525 16, TXGBE_ALIGN, SOCKET_ID_ANY);
529 hw->isb_dma = TMZ_PADDR(mz);
530 hw->isb_mem = TMZ_VADDR(mz);
532 /* Initialize the shared code (base driver) */
533 err = txgbe_init_shared_code(hw);
535 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
539 /* Unlock any pending hardware semaphore */
540 txgbe_swfw_lock_reset(hw);
542 /* Initialize DCB configuration*/
543 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
544 txgbe_dcb_init(hw, dcb_config);
546 /* Get Hardware Flow Control setting */
547 hw->fc.requested_mode = txgbe_fc_full;
548 hw->fc.current_mode = txgbe_fc_full;
549 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
550 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
551 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
552 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
556 err = hw->rom.init_params(hw);
558 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
562 /* Make sure we have a good EEPROM before we read from it */
563 err = hw->rom.validate_checksum(hw, &csum);
565 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
569 err = hw->mac.init_hw(hw);
572 * Devices with copper phys will fail to initialise if txgbe_init_hw()
573 * is called too soon after the kernel driver unbinding/binding occurs.
574 * The failure occurs in txgbe_identify_phy() for all devices,
575 * but for non-copper devies, txgbe_identify_sfp_module() is
576 * also called. See txgbe_identify_phy(). The reason for the
577 * failure is not known, and only occuts when virtualisation features
578 * are disabled in the bios. A delay of 200ms was found to be enough by
579 * trial-and-error, and is doubled to be safe.
581 if (err && hw->phy.media_type == txgbe_media_type_copper) {
583 err = hw->mac.init_hw(hw);
586 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
589 if (err == TXGBE_ERR_EEPROM_VERSION) {
590 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
591 "LOM. Please be aware there may be issues associated "
592 "with your hardware.");
593 PMD_INIT_LOG(ERR, "If you are experiencing problems "
594 "please contact your hardware representative "
595 "who provided you with this hardware.");
596 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
597 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
600 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
604 /* Reset the hw statistics */
605 txgbe_dev_stats_reset(eth_dev);
607 /* disable interrupt */
608 txgbe_disable_intr(hw);
610 /* Allocate memory for storing MAC addresses */
611 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
612 hw->mac.num_rar_entries, 0);
613 if (eth_dev->data->mac_addrs == NULL) {
615 "Failed to allocate %u bytes needed to store "
617 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
621 /* Copy the permanent MAC address */
622 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
623 ð_dev->data->mac_addrs[0]);
625 /* Allocate memory for storing hash filter MAC addresses */
626 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
627 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
628 if (eth_dev->data->hash_mac_addrs == NULL) {
630 "Failed to allocate %d bytes needed to store MAC addresses",
631 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
635 /* initialize the vfta */
636 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
638 /* initialize the hw strip bitmap*/
639 memset(hwstrip, 0, sizeof(*hwstrip));
641 /* initialize PF if max_vfs not zero */
642 ret = txgbe_pf_host_init(eth_dev);
644 rte_free(eth_dev->data->mac_addrs);
645 eth_dev->data->mac_addrs = NULL;
646 rte_free(eth_dev->data->hash_mac_addrs);
647 eth_dev->data->hash_mac_addrs = NULL;
651 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
652 /* let hardware know driver is loaded */
653 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
654 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
655 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
656 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
659 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
660 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
661 (int)hw->mac.type, (int)hw->phy.type,
662 (int)hw->phy.sfp_type);
664 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
665 (int)hw->mac.type, (int)hw->phy.type);
667 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
668 eth_dev->data->port_id, pci_dev->id.vendor_id,
669 pci_dev->id.device_id);
671 rte_intr_callback_register(intr_handle,
672 txgbe_dev_interrupt_handler, eth_dev);
674 /* enable uio/vfio intr/eventfd mapping */
675 rte_intr_enable(intr_handle);
677 /* enable support intr */
678 txgbe_enable_intr(eth_dev);
680 /* initialize bandwidth configuration info */
681 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
687 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
689 PMD_INIT_FUNC_TRACE();
691 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
694 txgbe_dev_close(eth_dev);
700 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
701 struct rte_pci_device *pci_dev)
703 struct rte_eth_dev *pf_ethdev;
704 struct rte_eth_devargs eth_da;
707 if (pci_dev->device.devargs) {
708 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
713 memset(ð_da, 0, sizeof(eth_da));
716 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
717 sizeof(struct txgbe_adapter),
718 eth_dev_pci_specific_init, pci_dev,
719 eth_txgbe_dev_init, NULL);
721 if (retval || eth_da.nb_representor_ports < 1)
724 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
725 if (pf_ethdev == NULL)
731 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
733 struct rte_eth_dev *ethdev;
735 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
739 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
742 static struct rte_pci_driver rte_txgbe_pmd = {
743 .id_table = pci_id_txgbe_map,
744 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
745 RTE_PCI_DRV_INTR_LSC,
746 .probe = eth_txgbe_pci_probe,
747 .remove = eth_txgbe_pci_remove,
751 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
753 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
754 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
759 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
760 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
761 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
766 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
768 /* update local VFTA copy */
769 shadow_vfta->vfta[vid_idx] = vfta;
775 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
777 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
778 struct txgbe_rx_queue *rxq;
780 uint32_t rxcfg, rxbal, rxbah;
783 txgbe_vlan_hw_strip_enable(dev, queue);
785 txgbe_vlan_hw_strip_disable(dev, queue);
787 rxq = dev->data->rx_queues[queue];
788 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
789 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
790 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
791 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
792 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
793 !(rxcfg & TXGBE_RXCFG_VLAN);
794 rxcfg |= TXGBE_RXCFG_VLAN;
796 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
797 (rxcfg & TXGBE_RXCFG_VLAN);
798 rxcfg &= ~TXGBE_RXCFG_VLAN;
800 rxcfg &= ~TXGBE_RXCFG_ENA;
803 /* set vlan strip for ring */
804 txgbe_dev_rx_queue_stop(dev, queue);
805 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
806 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
807 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
808 txgbe_dev_rx_queue_start(dev, queue);
813 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
814 enum rte_vlan_type vlan_type,
817 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
819 uint32_t portctrl, vlan_ext, qinq;
821 portctrl = rd32(hw, TXGBE_PORTCTL);
823 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
824 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
826 case ETH_VLAN_TYPE_INNER:
828 wr32m(hw, TXGBE_VLANCTL,
829 TXGBE_VLANCTL_TPID_MASK,
830 TXGBE_VLANCTL_TPID(tpid));
831 wr32m(hw, TXGBE_DMATXCTRL,
832 TXGBE_DMATXCTRL_TPID_MASK,
833 TXGBE_DMATXCTRL_TPID(tpid));
836 PMD_DRV_LOG(ERR, "Inner type is not supported"
841 wr32m(hw, TXGBE_TAGTPID(0),
842 TXGBE_TAGTPID_LSB_MASK,
843 TXGBE_TAGTPID_LSB(tpid));
846 case ETH_VLAN_TYPE_OUTER:
848 /* Only the high 16-bits is valid */
849 wr32m(hw, TXGBE_EXTAG,
850 TXGBE_EXTAG_VLAN_MASK,
851 TXGBE_EXTAG_VLAN(tpid));
853 wr32m(hw, TXGBE_VLANCTL,
854 TXGBE_VLANCTL_TPID_MASK,
855 TXGBE_VLANCTL_TPID(tpid));
856 wr32m(hw, TXGBE_DMATXCTRL,
857 TXGBE_DMATXCTRL_TPID_MASK,
858 TXGBE_DMATXCTRL_TPID(tpid));
862 wr32m(hw, TXGBE_TAGTPID(0),
863 TXGBE_TAGTPID_MSB_MASK,
864 TXGBE_TAGTPID_MSB(tpid));
868 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
876 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
878 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
881 PMD_INIT_FUNC_TRACE();
883 /* Filter Table Disable */
884 vlnctrl = rd32(hw, TXGBE_VLANCTL);
885 vlnctrl &= ~TXGBE_VLANCTL_VFE;
886 wr32(hw, TXGBE_VLANCTL, vlnctrl);
890 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
892 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
893 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
897 PMD_INIT_FUNC_TRACE();
899 /* Filter Table Enable */
900 vlnctrl = rd32(hw, TXGBE_VLANCTL);
901 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
902 vlnctrl |= TXGBE_VLANCTL_VFE;
903 wr32(hw, TXGBE_VLANCTL, vlnctrl);
905 /* write whatever is in local vfta copy */
906 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
907 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
911 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
913 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
914 struct txgbe_rx_queue *rxq;
916 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
920 TXGBE_SET_HWSTRIP(hwstrip, queue);
922 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
924 if (queue >= dev->data->nb_rx_queues)
927 rxq = dev->data->rx_queues[queue];
930 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
931 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
933 rxq->vlan_flags = PKT_RX_VLAN;
934 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
939 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
941 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
944 PMD_INIT_FUNC_TRACE();
946 ctrl = rd32(hw, TXGBE_RXCFG(queue));
947 ctrl &= ~TXGBE_RXCFG_VLAN;
948 wr32(hw, TXGBE_RXCFG(queue), ctrl);
950 /* record those setting for HW strip per queue */
951 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
955 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
957 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
960 PMD_INIT_FUNC_TRACE();
962 ctrl = rd32(hw, TXGBE_RXCFG(queue));
963 ctrl |= TXGBE_RXCFG_VLAN;
964 wr32(hw, TXGBE_RXCFG(queue), ctrl);
966 /* record those setting for HW strip per queue */
967 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
971 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
973 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
976 PMD_INIT_FUNC_TRACE();
978 ctrl = rd32(hw, TXGBE_PORTCTL);
979 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
980 ctrl &= ~TXGBE_PORTCTL_QINQ;
981 wr32(hw, TXGBE_PORTCTL, ctrl);
985 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
987 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
988 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
989 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
992 PMD_INIT_FUNC_TRACE();
994 ctrl = rd32(hw, TXGBE_PORTCTL);
995 ctrl |= TXGBE_PORTCTL_VLANEXT;
996 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
997 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
998 ctrl |= TXGBE_PORTCTL_QINQ;
999 wr32(hw, TXGBE_PORTCTL, ctrl);
1003 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1005 struct txgbe_rx_queue *rxq;
1008 PMD_INIT_FUNC_TRACE();
1010 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1011 rxq = dev->data->rx_queues[i];
1013 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1014 txgbe_vlan_strip_queue_set(dev, i, 1);
1016 txgbe_vlan_strip_queue_set(dev, i, 0);
1021 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1024 struct rte_eth_rxmode *rxmode;
1025 struct txgbe_rx_queue *rxq;
1027 if (mask & ETH_VLAN_STRIP_MASK) {
1028 rxmode = &dev->data->dev_conf.rxmode;
1029 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1030 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1031 rxq = dev->data->rx_queues[i];
1032 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1035 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1036 rxq = dev->data->rx_queues[i];
1037 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1043 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1045 struct rte_eth_rxmode *rxmode;
1046 rxmode = &dev->data->dev_conf.rxmode;
1048 if (mask & ETH_VLAN_STRIP_MASK)
1049 txgbe_vlan_hw_strip_config(dev);
1051 if (mask & ETH_VLAN_FILTER_MASK) {
1052 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1053 txgbe_vlan_hw_filter_enable(dev);
1055 txgbe_vlan_hw_filter_disable(dev);
1058 if (mask & ETH_VLAN_EXTEND_MASK) {
1059 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1060 txgbe_vlan_hw_extend_enable(dev);
1062 txgbe_vlan_hw_extend_disable(dev);
1069 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1071 txgbe_config_vlan_strip_on_all_queues(dev, mask);
1073 txgbe_vlan_offload_config(dev, mask);
1079 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1081 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1082 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1083 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1085 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1086 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1090 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1092 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1097 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1100 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1106 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1107 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1108 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1109 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1114 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1116 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1117 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1118 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1120 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1121 /* check multi-queue mode */
1122 switch (dev_conf->rxmode.mq_mode) {
1123 case ETH_MQ_RX_VMDQ_DCB:
1124 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1126 case ETH_MQ_RX_VMDQ_DCB_RSS:
1127 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1128 PMD_INIT_LOG(ERR, "SRIOV active,"
1129 " unsupported mq_mode rx %d.",
1130 dev_conf->rxmode.mq_mode);
1133 case ETH_MQ_RX_VMDQ_RSS:
1134 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1135 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1136 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1137 PMD_INIT_LOG(ERR, "SRIOV is active,"
1138 " invalid queue number"
1139 " for VMDQ RSS, allowed"
1140 " value are 1, 2 or 4.");
1144 case ETH_MQ_RX_VMDQ_ONLY:
1145 case ETH_MQ_RX_NONE:
1146 /* if nothing mq mode configure, use default scheme */
1147 dev->data->dev_conf.rxmode.mq_mode =
1148 ETH_MQ_RX_VMDQ_ONLY;
1150 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1151 /* SRIOV only works in VMDq enable mode */
1152 PMD_INIT_LOG(ERR, "SRIOV is active,"
1153 " wrong mq_mode rx %d.",
1154 dev_conf->rxmode.mq_mode);
1158 switch (dev_conf->txmode.mq_mode) {
1159 case ETH_MQ_TX_VMDQ_DCB:
1160 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1161 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1163 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1164 dev->data->dev_conf.txmode.mq_mode =
1165 ETH_MQ_TX_VMDQ_ONLY;
1169 /* check valid queue number */
1170 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1171 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1172 PMD_INIT_LOG(ERR, "SRIOV is active,"
1173 " nb_rx_q=%d nb_tx_q=%d queue number"
1174 " must be less than or equal to %d.",
1176 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1180 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1181 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1185 /* check configuration for vmdb+dcb mode */
1186 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1187 const struct rte_eth_vmdq_dcb_conf *conf;
1189 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1190 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1191 TXGBE_VMDQ_DCB_NB_QUEUES);
1194 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1195 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1196 conf->nb_queue_pools == ETH_32_POOLS)) {
1197 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1198 " nb_queue_pools must be %d or %d.",
1199 ETH_16_POOLS, ETH_32_POOLS);
1203 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1204 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1206 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1207 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1208 TXGBE_VMDQ_DCB_NB_QUEUES);
1211 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1212 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1213 conf->nb_queue_pools == ETH_32_POOLS)) {
1214 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1215 " nb_queue_pools != %d and"
1216 " nb_queue_pools != %d.",
1217 ETH_16_POOLS, ETH_32_POOLS);
1222 /* For DCB mode check our configuration before we go further */
1223 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1224 const struct rte_eth_dcb_rx_conf *conf;
1226 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1227 if (!(conf->nb_tcs == ETH_4_TCS ||
1228 conf->nb_tcs == ETH_8_TCS)) {
1229 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1230 " and nb_tcs != %d.",
1231 ETH_4_TCS, ETH_8_TCS);
1236 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1237 const struct rte_eth_dcb_tx_conf *conf;
1239 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1240 if (!(conf->nb_tcs == ETH_4_TCS ||
1241 conf->nb_tcs == ETH_8_TCS)) {
1242 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1243 " and nb_tcs != %d.",
1244 ETH_4_TCS, ETH_8_TCS);
1253 txgbe_dev_configure(struct rte_eth_dev *dev)
1255 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1256 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1259 PMD_INIT_FUNC_TRACE();
1261 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1262 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1264 /* multiple queue mode checking */
1265 ret = txgbe_check_mq_mode(dev);
1267 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1272 /* set flag to update link status after init */
1273 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1276 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1277 * allocation Rx preconditions we will reset it.
1279 adapter->rx_bulk_alloc_allowed = true;
1285 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1287 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1288 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1291 gpie = rd32(hw, TXGBE_GPIOINTEN);
1292 gpie |= TXGBE_GPIOBIT_6;
1293 wr32(hw, TXGBE_GPIOINTEN, gpie);
1294 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1298 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1299 uint16_t tx_rate, uint64_t q_msk)
1301 struct txgbe_hw *hw;
1302 struct txgbe_vf_info *vfinfo;
1303 struct rte_eth_link link;
1304 uint8_t nb_q_per_pool;
1305 uint32_t queue_stride;
1306 uint32_t queue_idx, idx = 0, vf_idx;
1308 uint16_t total_rate = 0;
1309 struct rte_pci_device *pci_dev;
1312 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1313 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1317 if (vf >= pci_dev->max_vfs)
1320 if (tx_rate > link.link_speed)
1326 hw = TXGBE_DEV_HW(dev);
1327 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1328 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1329 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1330 queue_idx = vf * queue_stride;
1331 queue_end = queue_idx + nb_q_per_pool - 1;
1332 if (queue_end >= hw->mac.max_tx_queues)
1336 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1339 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1341 total_rate += vfinfo[vf_idx].tx_rate[idx];
1347 /* Store tx_rate for this vf. */
1348 for (idx = 0; idx < nb_q_per_pool; idx++) {
1349 if (((uint64_t)0x1 << idx) & q_msk) {
1350 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1351 vfinfo[vf].tx_rate[idx] = tx_rate;
1352 total_rate += tx_rate;
1356 if (total_rate > dev->data->dev_link.link_speed) {
1357 /* Reset stored TX rate of the VF if it causes exceed
1360 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1364 /* Set ARBTXRATE of each queue/pool for vf X */
1365 for (; queue_idx <= queue_end; queue_idx++) {
1367 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1375 * Configure device link speed and setup link.
1376 * It returns 0 on success.
1379 txgbe_dev_start(struct rte_eth_dev *dev)
1381 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1382 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1383 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1384 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1385 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1386 uint32_t intr_vector = 0;
1388 bool link_up = false, negotiate = 0;
1390 uint32_t allowed_speeds = 0;
1394 uint32_t *link_speeds;
1396 PMD_INIT_FUNC_TRACE();
1398 /* TXGBE devices don't support:
1399 * - half duplex (checked afterwards for valid speeds)
1400 * - fixed speed: TODO implement
1402 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1404 "Invalid link_speeds for port %u, fix speed not supported",
1405 dev->data->port_id);
1409 /* Stop the link setup handler before resetting the HW. */
1410 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1412 /* disable uio/vfio intr/eventfd mapping */
1413 rte_intr_disable(intr_handle);
1416 hw->adapter_stopped = 0;
1419 /* reinitialize adapter
1420 * this calls reset and start
1422 hw->nb_rx_queues = dev->data->nb_rx_queues;
1423 hw->nb_tx_queues = dev->data->nb_tx_queues;
1424 status = txgbe_pf_reset_hw(hw);
1427 hw->mac.start_hw(hw);
1428 hw->mac.get_link_status = true;
1430 /* configure PF module if SRIOV enabled */
1431 txgbe_pf_host_configure(dev);
1433 txgbe_dev_phy_intr_setup(dev);
1435 /* check and configure queue intr-vector mapping */
1436 if ((rte_intr_cap_multiple(intr_handle) ||
1437 !RTE_ETH_DEV_SRIOV(dev).active) &&
1438 dev->data->dev_conf.intr_conf.rxq != 0) {
1439 intr_vector = dev->data->nb_rx_queues;
1440 if (rte_intr_efd_enable(intr_handle, intr_vector))
1444 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1445 intr_handle->intr_vec =
1446 rte_zmalloc("intr_vec",
1447 dev->data->nb_rx_queues * sizeof(int), 0);
1448 if (intr_handle->intr_vec == NULL) {
1449 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1450 " intr_vec", dev->data->nb_rx_queues);
1455 /* confiugre msix for sleep until rx interrupt */
1456 txgbe_configure_msix(dev);
1458 /* initialize transmission unit */
1459 txgbe_dev_tx_init(dev);
1461 /* This can fail when allocating mbufs for descriptor rings */
1462 err = txgbe_dev_rx_init(dev);
1464 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1468 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1469 ETH_VLAN_EXTEND_MASK;
1470 err = txgbe_vlan_offload_config(dev, mask);
1472 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1476 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1477 /* Enable vlan filtering for VMDq */
1478 txgbe_vmdq_vlan_hw_filter_enable(dev);
1481 /* Configure DCB hw */
1482 txgbe_configure_pb(dev);
1483 txgbe_configure_port(dev);
1484 txgbe_configure_dcb(dev);
1486 /* Restore vf rate limit */
1487 if (vfinfo != NULL) {
1488 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1489 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1490 if (vfinfo[vf].tx_rate[idx] != 0)
1491 txgbe_set_vf_rate_limit(dev, vf,
1492 vfinfo[vf].tx_rate[idx],
1496 err = txgbe_dev_rxtx_start(dev);
1498 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1502 /* Skip link setup if loopback mode is enabled. */
1503 if (hw->mac.type == txgbe_mac_raptor &&
1504 dev->data->dev_conf.lpbk_mode)
1505 goto skip_link_setup;
1507 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1508 err = hw->mac.setup_sfp(hw);
1513 if (hw->phy.media_type == txgbe_media_type_copper) {
1514 /* Turn on the copper */
1515 hw->phy.set_phy_power(hw, true);
1517 /* Turn on the laser */
1518 hw->mac.enable_tx_laser(hw);
1521 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1524 dev->data->dev_link.link_status = link_up;
1526 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1530 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1533 link_speeds = &dev->data->dev_conf.link_speeds;
1534 if (*link_speeds & ~allowed_speeds) {
1535 PMD_INIT_LOG(ERR, "Invalid link setting");
1540 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1541 speed = (TXGBE_LINK_SPEED_100M_FULL |
1542 TXGBE_LINK_SPEED_1GB_FULL |
1543 TXGBE_LINK_SPEED_10GB_FULL);
1545 if (*link_speeds & ETH_LINK_SPEED_10G)
1546 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1547 if (*link_speeds & ETH_LINK_SPEED_5G)
1548 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1549 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1550 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1551 if (*link_speeds & ETH_LINK_SPEED_1G)
1552 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1553 if (*link_speeds & ETH_LINK_SPEED_100M)
1554 speed |= TXGBE_LINK_SPEED_100M_FULL;
1557 err = hw->mac.setup_link(hw, speed, link_up);
1563 if (rte_intr_allow_others(intr_handle)) {
1564 /* check if lsc interrupt is enabled */
1565 if (dev->data->dev_conf.intr_conf.lsc != 0)
1566 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1568 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1569 txgbe_dev_macsec_interrupt_setup(dev);
1570 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1572 rte_intr_callback_unregister(intr_handle,
1573 txgbe_dev_interrupt_handler, dev);
1574 if (dev->data->dev_conf.intr_conf.lsc != 0)
1575 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1576 " no intr multiplex");
1579 /* check if rxq interrupt is enabled */
1580 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1581 rte_intr_dp_is_en(intr_handle))
1582 txgbe_dev_rxq_interrupt_setup(dev);
1584 /* enable uio/vfio intr/eventfd mapping */
1585 rte_intr_enable(intr_handle);
1587 /* resume enabled intr since hw reset */
1588 txgbe_enable_intr(dev);
1591 * Update link status right before return, because it may
1592 * start link configuration process in a separate thread.
1594 txgbe_dev_link_update(dev, 0);
1596 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1598 txgbe_read_stats_registers(hw, hw_stats);
1599 hw->offset_loaded = 1;
1604 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1605 txgbe_dev_clear_queues(dev);
1610 * Stop device: disable rx and tx functions to allow for reconfiguring.
1613 txgbe_dev_stop(struct rte_eth_dev *dev)
1615 struct rte_eth_link link;
1616 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1617 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1618 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1619 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1620 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1623 if (hw->adapter_stopped)
1626 PMD_INIT_FUNC_TRACE();
1628 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1630 /* disable interrupts */
1631 txgbe_disable_intr(hw);
1634 txgbe_pf_reset_hw(hw);
1635 hw->adapter_stopped = 0;
1640 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1641 vfinfo[vf].clear_to_send = false;
1643 if (hw->phy.media_type == txgbe_media_type_copper) {
1644 /* Turn off the copper */
1645 hw->phy.set_phy_power(hw, false);
1647 /* Turn off the laser */
1648 hw->mac.disable_tx_laser(hw);
1651 txgbe_dev_clear_queues(dev);
1653 /* Clear stored conf */
1654 dev->data->scattered_rx = 0;
1657 /* Clear recorded link status */
1658 memset(&link, 0, sizeof(link));
1659 rte_eth_linkstatus_set(dev, &link);
1661 if (!rte_intr_allow_others(intr_handle))
1662 /* resume to the default handler */
1663 rte_intr_callback_register(intr_handle,
1664 txgbe_dev_interrupt_handler,
1667 /* Clean datapath event and queue/vec mapping */
1668 rte_intr_efd_disable(intr_handle);
1669 if (intr_handle->intr_vec != NULL) {
1670 rte_free(intr_handle->intr_vec);
1671 intr_handle->intr_vec = NULL;
1674 adapter->rss_reta_updated = 0;
1675 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1677 hw->adapter_stopped = true;
1678 dev->data->dev_started = 0;
1684 * Set device link up: enable tx.
1687 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1689 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1691 if (hw->phy.media_type == txgbe_media_type_copper) {
1692 /* Turn on the copper */
1693 hw->phy.set_phy_power(hw, true);
1695 /* Turn on the laser */
1696 hw->mac.enable_tx_laser(hw);
1697 txgbe_dev_link_update(dev, 0);
1704 * Set device link down: disable tx.
1707 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1709 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1711 if (hw->phy.media_type == txgbe_media_type_copper) {
1712 /* Turn off the copper */
1713 hw->phy.set_phy_power(hw, false);
1715 /* Turn off the laser */
1716 hw->mac.disable_tx_laser(hw);
1717 txgbe_dev_link_update(dev, 0);
1724 * Reset and stop device.
1727 txgbe_dev_close(struct rte_eth_dev *dev)
1729 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1730 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1731 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1735 PMD_INIT_FUNC_TRACE();
1737 txgbe_pf_reset_hw(hw);
1739 ret = txgbe_dev_stop(dev);
1741 txgbe_dev_free_queues(dev);
1743 /* reprogram the RAR[0] in case user changed it. */
1744 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1746 /* Unlock any pending hardware semaphore */
1747 txgbe_swfw_lock_reset(hw);
1749 /* disable uio intr before callback unregister */
1750 rte_intr_disable(intr_handle);
1753 ret = rte_intr_callback_unregister(intr_handle,
1754 txgbe_dev_interrupt_handler, dev);
1755 if (ret >= 0 || ret == -ENOENT) {
1757 } else if (ret != -EAGAIN) {
1759 "intr callback unregister failed: %d",
1763 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1765 /* cancel the delay handler before remove dev */
1766 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1768 /* uninitialize PF if max_vfs not zero */
1769 txgbe_pf_host_uninit(dev);
1771 rte_free(dev->data->mac_addrs);
1772 dev->data->mac_addrs = NULL;
1774 rte_free(dev->data->hash_mac_addrs);
1775 dev->data->hash_mac_addrs = NULL;
1784 txgbe_dev_reset(struct rte_eth_dev *dev)
1788 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1789 * its VF to make them align with it. The detailed notification
1790 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1791 * To avoid unexpected behavior in VF, currently reset of PF with
1792 * SR-IOV activation is not supported. It might be supported later.
1794 if (dev->data->sriov.active)
1797 ret = eth_txgbe_dev_uninit(dev);
1801 ret = eth_txgbe_dev_init(dev, NULL);
1806 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1808 uint32_t current_counter = rd32(hw, reg); \
1809 if (current_counter < last_counter) \
1810 current_counter += 0x100000000LL; \
1811 if (!hw->offset_loaded) \
1812 last_counter = current_counter; \
1813 counter = current_counter - last_counter; \
1814 counter &= 0xFFFFFFFFLL; \
1817 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1819 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1820 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1821 uint64_t current_counter = (current_counter_msb << 32) | \
1822 current_counter_lsb; \
1823 if (current_counter < last_counter) \
1824 current_counter += 0x1000000000LL; \
1825 if (!hw->offset_loaded) \
1826 last_counter = current_counter; \
1827 counter = current_counter - last_counter; \
1828 counter &= 0xFFFFFFFFFLL; \
1832 txgbe_read_stats_registers(struct txgbe_hw *hw,
1833 struct txgbe_hw_stats *hw_stats)
1838 for (i = 0; i < hw->nb_rx_queues; i++) {
1839 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1840 hw->qp_last[i].rx_qp_packets,
1841 hw_stats->qp[i].rx_qp_packets);
1842 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1843 hw->qp_last[i].rx_qp_bytes,
1844 hw_stats->qp[i].rx_qp_bytes);
1845 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1846 hw->qp_last[i].rx_qp_mc_packets,
1847 hw_stats->qp[i].rx_qp_mc_packets);
1850 for (i = 0; i < hw->nb_tx_queues; i++) {
1851 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1852 hw->qp_last[i].tx_qp_packets,
1853 hw_stats->qp[i].tx_qp_packets);
1854 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1855 hw->qp_last[i].tx_qp_bytes,
1856 hw_stats->qp[i].tx_qp_bytes);
1859 for (i = 0; i < TXGBE_MAX_UP; i++) {
1860 hw_stats->up[i].rx_up_xon_packets +=
1861 rd32(hw, TXGBE_PBRXUPXON(i));
1862 hw_stats->up[i].rx_up_xoff_packets +=
1863 rd32(hw, TXGBE_PBRXUPXOFF(i));
1864 hw_stats->up[i].tx_up_xon_packets +=
1865 rd32(hw, TXGBE_PBTXUPXON(i));
1866 hw_stats->up[i].tx_up_xoff_packets +=
1867 rd32(hw, TXGBE_PBTXUPXOFF(i));
1868 hw_stats->up[i].tx_up_xon2off_packets +=
1869 rd32(hw, TXGBE_PBTXUPOFF(i));
1870 hw_stats->up[i].rx_up_dropped +=
1871 rd32(hw, TXGBE_PBRXMISS(i));
1873 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1874 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1875 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1876 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1879 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1880 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1882 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1883 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1884 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1887 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1888 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1889 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1891 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1892 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1893 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1895 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1896 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1898 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1899 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1900 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1901 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1902 hw_stats->rx_size_512_to_1023_packets +=
1903 rd64(hw, TXGBE_MACRX512TO1023L);
1904 hw_stats->rx_size_1024_to_max_packets +=
1905 rd64(hw, TXGBE_MACRX1024TOMAXL);
1906 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1907 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1908 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1909 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1910 hw_stats->tx_size_512_to_1023_packets +=
1911 rd64(hw, TXGBE_MACTX512TO1023L);
1912 hw_stats->tx_size_1024_to_max_packets +=
1913 rd64(hw, TXGBE_MACTX1024TOMAXL);
1915 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1916 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1917 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1920 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1921 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1922 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1923 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1926 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1927 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1928 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1929 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1930 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1931 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1932 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1934 /* Flow Director Stats */
1935 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1936 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1937 hw_stats->flow_director_added_filters +=
1938 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1939 hw_stats->flow_director_removed_filters +=
1940 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1941 hw_stats->flow_director_filter_add_errors +=
1942 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1943 hw_stats->flow_director_filter_remove_errors +=
1944 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1947 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1948 hw_stats->tx_macsec_pkts_encrypted +=
1949 rd32(hw, TXGBE_LSECTX_ENCPKT);
1950 hw_stats->tx_macsec_pkts_protected +=
1951 rd32(hw, TXGBE_LSECTX_PROTPKT);
1952 hw_stats->tx_macsec_octets_encrypted +=
1953 rd32(hw, TXGBE_LSECTX_ENCOCT);
1954 hw_stats->tx_macsec_octets_protected +=
1955 rd32(hw, TXGBE_LSECTX_PROTOCT);
1956 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1957 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1958 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1959 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1960 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1961 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1962 hw_stats->rx_macsec_sc_pkts_unchecked +=
1963 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1964 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1965 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1966 for (i = 0; i < 2; i++) {
1967 hw_stats->rx_macsec_sa_pkts_ok +=
1968 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1969 hw_stats->rx_macsec_sa_pkts_invalid +=
1970 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1971 hw_stats->rx_macsec_sa_pkts_notvalid +=
1972 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1974 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1975 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1976 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1977 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1979 hw_stats->rx_total_missed_packets = 0;
1980 for (i = 0; i < TXGBE_MAX_UP; i++) {
1981 hw_stats->rx_total_missed_packets +=
1982 hw_stats->up[i].rx_up_dropped;
1987 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1989 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1990 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1991 struct txgbe_stat_mappings *stat_mappings =
1992 TXGBE_DEV_STAT_MAPPINGS(dev);
1995 txgbe_read_stats_registers(hw, hw_stats);
2000 /* Fill out the rte_eth_stats statistics structure */
2001 stats->ipackets = hw_stats->rx_packets;
2002 stats->ibytes = hw_stats->rx_bytes;
2003 stats->opackets = hw_stats->tx_packets;
2004 stats->obytes = hw_stats->tx_bytes;
2006 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2007 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2008 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2009 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2010 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2011 for (i = 0; i < TXGBE_MAX_QP; i++) {
2012 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2013 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2016 q_map = (stat_mappings->rqsm[n] >> offset)
2017 & QMAP_FIELD_RESERVED_BITS_MASK;
2018 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2019 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2020 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2021 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2023 q_map = (stat_mappings->tqsm[n] >> offset)
2024 & QMAP_FIELD_RESERVED_BITS_MASK;
2025 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2026 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2027 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2028 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2032 stats->imissed = hw_stats->rx_total_missed_packets;
2033 stats->ierrors = hw_stats->rx_crc_errors +
2034 hw_stats->rx_mac_short_packet_dropped +
2035 hw_stats->rx_length_errors +
2036 hw_stats->rx_undersize_errors +
2037 hw_stats->rx_oversize_errors +
2038 hw_stats->rx_drop_packets +
2039 hw_stats->rx_illegal_byte_errors +
2040 hw_stats->rx_error_bytes +
2041 hw_stats->rx_fragment_errors +
2042 hw_stats->rx_fcoe_crc_errors +
2043 hw_stats->rx_fcoe_mbuf_allocation_errors;
2051 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2053 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2054 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2056 /* HW registers are cleared on read */
2057 hw->offset_loaded = 0;
2058 txgbe_dev_stats_get(dev, NULL);
2059 hw->offset_loaded = 1;
2061 /* Reset software totals */
2062 memset(hw_stats, 0, sizeof(*hw_stats));
2067 /* This function calculates the number of xstats based on the current config */
2069 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2071 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2072 return TXGBE_NB_HW_STATS +
2073 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2074 TXGBE_NB_QP_STATS * nb_queues;
2078 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2082 /* Extended stats from txgbe_hw_stats */
2083 if (id < TXGBE_NB_HW_STATS) {
2084 snprintf(name, size, "[hw]%s",
2085 rte_txgbe_stats_strings[id].name);
2088 id -= TXGBE_NB_HW_STATS;
2090 /* Priority Stats */
2091 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2092 nb = id / TXGBE_NB_UP_STATS;
2093 st = id % TXGBE_NB_UP_STATS;
2094 snprintf(name, size, "[p%u]%s", nb,
2095 rte_txgbe_up_strings[st].name);
2098 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2101 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2102 nb = id / TXGBE_NB_QP_STATS;
2103 st = id % TXGBE_NB_QP_STATS;
2104 snprintf(name, size, "[q%u]%s", nb,
2105 rte_txgbe_qp_strings[st].name);
2108 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2110 return -(int)(id + 1);
2114 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2118 /* Extended stats from txgbe_hw_stats */
2119 if (id < TXGBE_NB_HW_STATS) {
2120 *offset = rte_txgbe_stats_strings[id].offset;
2123 id -= TXGBE_NB_HW_STATS;
2125 /* Priority Stats */
2126 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2127 nb = id / TXGBE_NB_UP_STATS;
2128 st = id % TXGBE_NB_UP_STATS;
2129 *offset = rte_txgbe_up_strings[st].offset +
2130 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2133 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2136 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2137 nb = id / TXGBE_NB_QP_STATS;
2138 st = id % TXGBE_NB_QP_STATS;
2139 *offset = rte_txgbe_qp_strings[st].offset +
2140 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2143 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2145 return -(int)(id + 1);
2148 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2149 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2151 unsigned int i, count;
2153 count = txgbe_xstats_calc_num(dev);
2154 if (xstats_names == NULL)
2157 /* Note: limit >= cnt_stats checked upstream
2158 * in rte_eth_xstats_names()
2160 limit = min(limit, count);
2162 /* Extended stats from txgbe_hw_stats */
2163 for (i = 0; i < limit; i++) {
2164 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2165 sizeof(xstats_names[i].name))) {
2166 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2174 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2175 struct rte_eth_xstat_name *xstats_names,
2176 const uint64_t *ids,
2182 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2184 for (i = 0; i < limit; i++) {
2185 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2186 sizeof(xstats_names[i].name))) {
2187 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2196 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2199 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2200 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2201 unsigned int i, count;
2203 txgbe_read_stats_registers(hw, hw_stats);
2205 /* If this is a reset xstats is NULL, and we have cleared the
2206 * registers by reading them.
2208 count = txgbe_xstats_calc_num(dev);
2212 limit = min(limit, txgbe_xstats_calc_num(dev));
2214 /* Extended stats from txgbe_hw_stats */
2215 for (i = 0; i < limit; i++) {
2216 uint32_t offset = 0;
2218 if (txgbe_get_offset_by_id(i, &offset)) {
2219 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2222 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2230 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2233 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2234 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2235 unsigned int i, count;
2237 txgbe_read_stats_registers(hw, hw_stats);
2239 /* If this is a reset xstats is NULL, and we have cleared the
2240 * registers by reading them.
2242 count = txgbe_xstats_calc_num(dev);
2246 limit = min(limit, txgbe_xstats_calc_num(dev));
2248 /* Extended stats from txgbe_hw_stats */
2249 for (i = 0; i < limit; i++) {
2252 if (txgbe_get_offset_by_id(i, &offset)) {
2253 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2256 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2263 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2264 uint64_t *values, unsigned int limit)
2266 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2270 return txgbe_dev_xstats_get_(dev, values, limit);
2272 for (i = 0; i < limit; i++) {
2275 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2276 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2279 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2286 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2288 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2289 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2291 /* HW registers are cleared on read */
2292 hw->offset_loaded = 0;
2293 txgbe_read_stats_registers(hw, hw_stats);
2294 hw->offset_loaded = 1;
2296 /* Reset software totals */
2297 memset(hw_stats, 0, sizeof(*hw_stats));
2303 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2305 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2306 u16 eeprom_verh, eeprom_verl;
2310 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2311 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2313 etrack_id = (eeprom_verh << 16) | eeprom_verl;
2314 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2316 ret += 1; /* add the size of '\0' */
2317 if (fw_size < (u32)ret)
2324 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2326 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2327 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2329 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2330 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2331 dev_info->min_rx_bufsize = 1024;
2332 dev_info->max_rx_pktlen = 15872;
2333 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2334 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2335 dev_info->max_vfs = pci_dev->max_vfs;
2336 dev_info->max_vmdq_pools = ETH_64_POOLS;
2337 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2338 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2339 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2340 dev_info->rx_queue_offload_capa);
2341 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2342 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2344 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2346 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2347 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2348 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2350 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2355 dev_info->default_txconf = (struct rte_eth_txconf) {
2357 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2358 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2359 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2361 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2365 dev_info->rx_desc_lim = rx_desc_lim;
2366 dev_info->tx_desc_lim = tx_desc_lim;
2368 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2369 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2370 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2372 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2373 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2375 /* Driver-preferred Rx/Tx parameters */
2376 dev_info->default_rxportconf.burst_size = 32;
2377 dev_info->default_txportconf.burst_size = 32;
2378 dev_info->default_rxportconf.nb_queues = 1;
2379 dev_info->default_txportconf.nb_queues = 1;
2380 dev_info->default_rxportconf.ring_size = 256;
2381 dev_info->default_txportconf.ring_size = 256;
2387 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2389 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2390 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2391 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2392 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2393 return txgbe_get_supported_ptypes();
2399 txgbe_dev_setup_link_alarm_handler(void *param)
2401 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2402 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2403 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2405 bool autoneg = false;
2407 speed = hw->phy.autoneg_advertised;
2409 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2411 hw->mac.setup_link(hw, speed, true);
2413 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2416 /* return 0 means link status changed, -1 means not changed */
2418 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2419 int wait_to_complete)
2421 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2422 struct rte_eth_link link;
2423 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2424 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2429 memset(&link, 0, sizeof(link));
2430 link.link_status = ETH_LINK_DOWN;
2431 link.link_speed = ETH_SPEED_NUM_NONE;
2432 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2433 link.link_autoneg = ETH_LINK_AUTONEG;
2435 hw->mac.get_link_status = true;
2437 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2438 return rte_eth_linkstatus_set(dev, &link);
2440 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2441 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2444 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2447 link.link_speed = ETH_SPEED_NUM_100M;
2448 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2449 return rte_eth_linkstatus_set(dev, &link);
2453 if (hw->phy.media_type == txgbe_media_type_fiber) {
2454 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2455 rte_eal_alarm_set(10,
2456 txgbe_dev_setup_link_alarm_handler, dev);
2458 return rte_eth_linkstatus_set(dev, &link);
2461 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2462 link.link_status = ETH_LINK_UP;
2463 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2465 switch (link_speed) {
2467 case TXGBE_LINK_SPEED_UNKNOWN:
2468 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2469 link.link_speed = ETH_SPEED_NUM_100M;
2472 case TXGBE_LINK_SPEED_100M_FULL:
2473 link.link_speed = ETH_SPEED_NUM_100M;
2476 case TXGBE_LINK_SPEED_1GB_FULL:
2477 link.link_speed = ETH_SPEED_NUM_1G;
2480 case TXGBE_LINK_SPEED_2_5GB_FULL:
2481 link.link_speed = ETH_SPEED_NUM_2_5G;
2484 case TXGBE_LINK_SPEED_5GB_FULL:
2485 link.link_speed = ETH_SPEED_NUM_5G;
2488 case TXGBE_LINK_SPEED_10GB_FULL:
2489 link.link_speed = ETH_SPEED_NUM_10G;
2493 return rte_eth_linkstatus_set(dev, &link);
2497 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2499 return txgbe_dev_link_update_share(dev, wait_to_complete);
2503 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2505 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2508 fctrl = rd32(hw, TXGBE_PSRCTL);
2509 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2510 wr32(hw, TXGBE_PSRCTL, fctrl);
2516 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2518 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2521 fctrl = rd32(hw, TXGBE_PSRCTL);
2522 fctrl &= (~TXGBE_PSRCTL_UCP);
2523 if (dev->data->all_multicast == 1)
2524 fctrl |= TXGBE_PSRCTL_MCP;
2526 fctrl &= (~TXGBE_PSRCTL_MCP);
2527 wr32(hw, TXGBE_PSRCTL, fctrl);
2533 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2535 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2538 fctrl = rd32(hw, TXGBE_PSRCTL);
2539 fctrl |= TXGBE_PSRCTL_MCP;
2540 wr32(hw, TXGBE_PSRCTL, fctrl);
2546 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2548 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2551 if (dev->data->promiscuous == 1)
2552 return 0; /* must remain in all_multicast mode */
2554 fctrl = rd32(hw, TXGBE_PSRCTL);
2555 fctrl &= (~TXGBE_PSRCTL_MCP);
2556 wr32(hw, TXGBE_PSRCTL, fctrl);
2562 * It clears the interrupt causes and enables the interrupt.
2563 * It will be called once only during nic initialized.
2566 * Pointer to struct rte_eth_dev.
2568 * Enable or Disable.
2571 * - On success, zero.
2572 * - On failure, a negative value.
2575 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2577 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2579 txgbe_dev_link_status_print(dev);
2581 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2583 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2589 * It clears the interrupt causes and enables the interrupt.
2590 * It will be called once only during nic initialized.
2593 * Pointer to struct rte_eth_dev.
2596 * - On success, zero.
2597 * - On failure, a negative value.
2600 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2602 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2604 intr->mask[0] |= TXGBE_ICR_MASK;
2605 intr->mask[1] |= TXGBE_ICR_MASK;
2611 * It clears the interrupt causes and enables the interrupt.
2612 * It will be called once only during nic initialized.
2615 * Pointer to struct rte_eth_dev.
2618 * - On success, zero.
2619 * - On failure, a negative value.
2622 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2624 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2626 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2632 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2635 * Pointer to struct rte_eth_dev.
2638 * - On success, zero.
2639 * - On failure, a negative value.
2642 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2645 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2646 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2648 /* clear all cause mask */
2649 txgbe_disable_intr(hw);
2651 /* read-on-clear nic registers here */
2652 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2653 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2657 /* set flag for async link update */
2658 if (eicr & TXGBE_ICRMISC_LSC)
2659 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2661 if (eicr & TXGBE_ICRMISC_VFMBX)
2662 intr->flags |= TXGBE_FLAG_MAILBOX;
2664 if (eicr & TXGBE_ICRMISC_LNKSEC)
2665 intr->flags |= TXGBE_FLAG_MACSEC;
2667 if (eicr & TXGBE_ICRMISC_GPIO)
2668 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2674 * It gets and then prints the link status.
2677 * Pointer to struct rte_eth_dev.
2680 * - On success, zero.
2681 * - On failure, a negative value.
2684 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2686 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2687 struct rte_eth_link link;
2689 rte_eth_linkstatus_get(dev, &link);
2691 if (link.link_status) {
2692 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2693 (int)(dev->data->port_id),
2694 (unsigned int)link.link_speed,
2695 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2696 "full-duplex" : "half-duplex");
2698 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2699 (int)(dev->data->port_id));
2701 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2702 pci_dev->addr.domain,
2704 pci_dev->addr.devid,
2705 pci_dev->addr.function);
2709 * It executes link_update after knowing an interrupt occurred.
2712 * Pointer to struct rte_eth_dev.
2715 * - On success, zero.
2716 * - On failure, a negative value.
2719 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2720 struct rte_intr_handle *intr_handle)
2722 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2724 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2726 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2728 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2729 txgbe_pf_mbx_process(dev);
2730 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2733 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2734 hw->phy.handle_lasi(hw);
2735 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2738 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2739 struct rte_eth_link link;
2741 /*get the link status before link update, for predicting later*/
2742 rte_eth_linkstatus_get(dev, &link);
2744 txgbe_dev_link_update(dev, 0);
2747 if (!link.link_status)
2748 /* handle it 1 sec later, wait it being stable */
2749 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2750 /* likely to down */
2752 /* handle it 4 sec later, wait it being stable */
2753 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2755 txgbe_dev_link_status_print(dev);
2756 if (rte_eal_alarm_set(timeout * 1000,
2757 txgbe_dev_interrupt_delayed_handler,
2759 PMD_DRV_LOG(ERR, "Error setting alarm");
2761 /* remember original mask */
2762 intr->mask_misc_orig = intr->mask_misc;
2763 /* only disable lsc interrupt */
2764 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2768 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2769 txgbe_enable_intr(dev);
2770 rte_intr_enable(intr_handle);
2776 * Interrupt handler which shall be registered for alarm callback for delayed
2777 * handling specific interrupt to wait for the stable nic state. As the
2778 * NIC interrupt state is not stable for txgbe after link is just down,
2779 * it needs to wait 4 seconds to get the stable status.
2782 * Pointer to interrupt handle.
2784 * The address of parameter (struct rte_eth_dev *) registered before.
2790 txgbe_dev_interrupt_delayed_handler(void *param)
2792 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2793 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2794 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2795 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2796 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2799 txgbe_disable_intr(hw);
2801 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2802 if (eicr & TXGBE_ICRMISC_VFMBX)
2803 txgbe_pf_mbx_process(dev);
2805 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2806 hw->phy.handle_lasi(hw);
2807 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2810 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2811 txgbe_dev_link_update(dev, 0);
2812 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2813 txgbe_dev_link_status_print(dev);
2814 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2818 if (intr->flags & TXGBE_FLAG_MACSEC) {
2819 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2821 intr->flags &= ~TXGBE_FLAG_MACSEC;
2824 /* restore original mask */
2825 intr->mask_misc = intr->mask_misc_orig;
2826 intr->mask_misc_orig = 0;
2828 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2829 txgbe_enable_intr(dev);
2830 rte_intr_enable(intr_handle);
2834 * Interrupt handler triggered by NIC for handling
2835 * specific interrupt.
2838 * Pointer to interrupt handle.
2840 * The address of parameter (struct rte_eth_dev *) registered before.
2846 txgbe_dev_interrupt_handler(void *param)
2848 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2850 txgbe_dev_interrupt_get_status(dev);
2851 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2855 txgbe_dev_led_on(struct rte_eth_dev *dev)
2857 struct txgbe_hw *hw;
2859 hw = TXGBE_DEV_HW(dev);
2860 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
2864 txgbe_dev_led_off(struct rte_eth_dev *dev)
2866 struct txgbe_hw *hw;
2868 hw = TXGBE_DEV_HW(dev);
2869 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
2873 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2875 struct txgbe_hw *hw;
2881 hw = TXGBE_DEV_HW(dev);
2883 fc_conf->pause_time = hw->fc.pause_time;
2884 fc_conf->high_water = hw->fc.high_water[0];
2885 fc_conf->low_water = hw->fc.low_water[0];
2886 fc_conf->send_xon = hw->fc.send_xon;
2887 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2890 * Return rx_pause status according to actual setting of
2893 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
2894 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
2900 * Return tx_pause status according to actual setting of
2903 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
2904 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
2909 if (rx_pause && tx_pause)
2910 fc_conf->mode = RTE_FC_FULL;
2912 fc_conf->mode = RTE_FC_RX_PAUSE;
2914 fc_conf->mode = RTE_FC_TX_PAUSE;
2916 fc_conf->mode = RTE_FC_NONE;
2922 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2924 struct txgbe_hw *hw;
2926 uint32_t rx_buf_size;
2927 uint32_t max_high_water;
2928 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2935 PMD_INIT_FUNC_TRACE();
2937 hw = TXGBE_DEV_HW(dev);
2938 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
2939 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2942 * At least reserve one Ethernet frame for watermark
2943 * high_water/low_water in kilo bytes for txgbe
2945 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2946 if (fc_conf->high_water > max_high_water ||
2947 fc_conf->high_water < fc_conf->low_water) {
2948 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2949 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2953 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
2954 hw->fc.pause_time = fc_conf->pause_time;
2955 hw->fc.high_water[0] = fc_conf->high_water;
2956 hw->fc.low_water[0] = fc_conf->low_water;
2957 hw->fc.send_xon = fc_conf->send_xon;
2958 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2960 err = txgbe_fc_enable(hw);
2962 /* Not negotiated is not an error case */
2963 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
2964 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
2965 (fc_conf->mac_ctrl_frame_fwd
2966 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
2972 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
2977 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
2978 struct rte_eth_pfc_conf *pfc_conf)
2981 uint32_t rx_buf_size;
2982 uint32_t max_high_water;
2984 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
2985 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2986 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
2988 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2995 PMD_INIT_FUNC_TRACE();
2997 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
2998 tc_num = map[pfc_conf->priority];
2999 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3000 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3002 * At least reserve one Ethernet frame for watermark
3003 * high_water/low_water in kilo bytes for txgbe
3005 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3006 if (pfc_conf->fc.high_water > max_high_water ||
3007 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3008 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3009 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3013 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3014 hw->fc.pause_time = pfc_conf->fc.pause_time;
3015 hw->fc.send_xon = pfc_conf->fc.send_xon;
3016 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3017 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3019 err = txgbe_dcb_pfc_enable(hw, tc_num);
3021 /* Not negotiated is not an error case */
3022 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3025 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3030 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3031 struct rte_eth_rss_reta_entry64 *reta_conf,
3036 uint16_t idx, shift;
3037 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3038 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3040 PMD_INIT_FUNC_TRACE();
3042 if (!txgbe_rss_update_sp(hw->mac.type)) {
3043 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3048 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3049 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3050 "(%d) doesn't match the number hardware can supported "
3051 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3055 for (i = 0; i < reta_size; i += 4) {
3056 idx = i / RTE_RETA_GROUP_SIZE;
3057 shift = i % RTE_RETA_GROUP_SIZE;
3058 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3062 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3063 for (j = 0; j < 4; j++) {
3064 if (RS8(mask, j, 0x1)) {
3065 reta &= ~(MS32(8 * j, 0xFF));
3066 reta |= LS32(reta_conf[idx].reta[shift + j],
3070 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3072 adapter->rss_reta_updated = 1;
3078 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3079 struct rte_eth_rss_reta_entry64 *reta_conf,
3082 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3085 uint16_t idx, shift;
3087 PMD_INIT_FUNC_TRACE();
3089 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3090 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3091 "(%d) doesn't match the number hardware can supported "
3092 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3096 for (i = 0; i < reta_size; i += 4) {
3097 idx = i / RTE_RETA_GROUP_SIZE;
3098 shift = i % RTE_RETA_GROUP_SIZE;
3099 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3103 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3104 for (j = 0; j < 4; j++) {
3105 if (RS8(mask, j, 0x1))
3106 reta_conf[idx].reta[shift + j] =
3107 (uint16_t)RS32(reta, 8 * j, 0xFF);
3115 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3116 uint32_t index, uint32_t pool)
3118 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3119 uint32_t enable_addr = 1;
3121 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3126 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3128 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3130 txgbe_clear_rar(hw, index);
3134 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3136 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3138 txgbe_remove_rar(dev, 0);
3139 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3145 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3147 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3148 struct rte_eth_dev_info dev_info;
3149 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3150 struct rte_eth_dev_data *dev_data = dev->data;
3153 ret = txgbe_dev_info_get(dev, &dev_info);
3157 /* check that mtu is within the allowed range */
3158 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3161 /* If device is started, refuse mtu that requires the support of
3162 * scattered packets when this feature has not been enabled before.
3164 if (dev_data->dev_started && !dev_data->scattered_rx &&
3165 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3166 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3167 PMD_INIT_LOG(ERR, "Stop port first.");
3171 /* update max frame size */
3172 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3175 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3176 TXGBE_FRAME_SIZE_MAX);
3178 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3179 TXGBE_FRMSZ_MAX(frame_size));
3185 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3187 uint32_t vector = 0;
3189 switch (hw->mac.mc_filter_type) {
3190 case 0: /* use bits [47:36] of the address */
3191 vector = ((uc_addr->addr_bytes[4] >> 4) |
3192 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3194 case 1: /* use bits [46:35] of the address */
3195 vector = ((uc_addr->addr_bytes[4] >> 3) |
3196 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3198 case 2: /* use bits [45:34] of the address */
3199 vector = ((uc_addr->addr_bytes[4] >> 2) |
3200 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3202 case 3: /* use bits [43:32] of the address */
3203 vector = ((uc_addr->addr_bytes[4]) |
3204 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3206 default: /* Invalid mc_filter_type */
3210 /* vector can only be 12-bits or boundary will be exceeded */
3216 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3217 struct rte_ether_addr *mac_addr, uint8_t on)
3225 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3226 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3228 /* The UTA table only exists on pf hardware */
3229 if (hw->mac.type < txgbe_mac_raptor)
3232 vector = txgbe_uta_vector(hw, mac_addr);
3233 uta_idx = (vector >> 5) & 0x7F;
3234 uta_mask = 0x1UL << (vector & 0x1F);
3236 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3239 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3241 uta_info->uta_in_use++;
3242 reg_val |= uta_mask;
3243 uta_info->uta_shadow[uta_idx] |= uta_mask;
3245 uta_info->uta_in_use--;
3246 reg_val &= ~uta_mask;
3247 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3250 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3252 psrctl = rd32(hw, TXGBE_PSRCTL);
3253 if (uta_info->uta_in_use > 0)
3254 psrctl |= TXGBE_PSRCTL_UCHFENA;
3256 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3258 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3259 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3260 wr32(hw, TXGBE_PSRCTL, psrctl);
3266 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3268 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3269 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3273 /* The UTA table only exists on pf hardware */
3274 if (hw->mac.type < txgbe_mac_raptor)
3278 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3279 uta_info->uta_shadow[i] = ~0;
3280 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3283 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3284 uta_info->uta_shadow[i] = 0;
3285 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3289 psrctl = rd32(hw, TXGBE_PSRCTL);
3291 psrctl |= TXGBE_PSRCTL_UCHFENA;
3293 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3295 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3296 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3297 wr32(hw, TXGBE_PSRCTL, psrctl);
3303 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3305 uint32_t new_val = orig_val;
3307 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3308 new_val |= TXGBE_POOLETHCTL_UTA;
3309 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3310 new_val |= TXGBE_POOLETHCTL_MCHA;
3311 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3312 new_val |= TXGBE_POOLETHCTL_UCHA;
3313 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3314 new_val |= TXGBE_POOLETHCTL_BCA;
3315 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3316 new_val |= TXGBE_POOLETHCTL_MCP;
3322 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3324 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3325 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3327 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3329 if (queue_id < 32) {
3330 mask = rd32(hw, TXGBE_IMS(0));
3331 mask &= (1 << queue_id);
3332 wr32(hw, TXGBE_IMS(0), mask);
3333 } else if (queue_id < 64) {
3334 mask = rd32(hw, TXGBE_IMS(1));
3335 mask &= (1 << (queue_id - 32));
3336 wr32(hw, TXGBE_IMS(1), mask);
3338 rte_intr_enable(intr_handle);
3344 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3347 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3349 if (queue_id < 32) {
3350 mask = rd32(hw, TXGBE_IMS(0));
3351 mask &= ~(1 << queue_id);
3352 wr32(hw, TXGBE_IMS(0), mask);
3353 } else if (queue_id < 64) {
3354 mask = rd32(hw, TXGBE_IMS(1));
3355 mask &= ~(1 << (queue_id - 32));
3356 wr32(hw, TXGBE_IMS(1), mask);
3363 * set the IVAR registers, mapping interrupt causes to vectors
3365 * pointer to txgbe_hw struct
3367 * 0 for Rx, 1 for Tx, -1 for other causes
3369 * queue to map the corresponding interrupt to
3371 * the vector to map to the corresponding queue
3374 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3375 uint8_t queue, uint8_t msix_vector)
3379 if (direction == -1) {
3381 msix_vector |= TXGBE_IVARMISC_VLD;
3383 tmp = rd32(hw, TXGBE_IVARMISC);
3384 tmp &= ~(0xFF << idx);
3385 tmp |= (msix_vector << idx);
3386 wr32(hw, TXGBE_IVARMISC, tmp);
3388 /* rx or tx causes */
3389 /* Workround for ICR lost */
3390 idx = ((16 * (queue & 1)) + (8 * direction));
3391 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3392 tmp &= ~(0xFF << idx);
3393 tmp |= (msix_vector << idx);
3394 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3399 * Sets up the hardware to properly generate MSI-X interrupts
3401 * board private structure
3404 txgbe_configure_msix(struct rte_eth_dev *dev)
3406 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3407 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3408 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3409 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3410 uint32_t vec = TXGBE_MISC_VEC_ID;
3413 /* won't configure msix register if no mapping is done
3414 * between intr vector and event fd
3415 * but if misx has been enabled already, need to configure
3416 * auto clean, auto mask and throttling.
3418 gpie = rd32(hw, TXGBE_GPIE);
3419 if (!rte_intr_dp_is_en(intr_handle) &&
3420 !(gpie & TXGBE_GPIE_MSIX))
3423 if (rte_intr_allow_others(intr_handle)) {
3424 base = TXGBE_RX_VEC_START;
3428 /* setup GPIE for MSI-x mode */
3429 gpie = rd32(hw, TXGBE_GPIE);
3430 gpie |= TXGBE_GPIE_MSIX;
3431 wr32(hw, TXGBE_GPIE, gpie);
3433 /* Populate the IVAR table and set the ITR values to the
3434 * corresponding register.
3436 if (rte_intr_dp_is_en(intr_handle)) {
3437 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3439 /* by default, 1:1 mapping */
3440 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3441 intr_handle->intr_vec[queue_id] = vec;
3442 if (vec < base + intr_handle->nb_efd - 1)
3446 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3448 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3449 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3454 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3455 uint16_t queue_idx, uint16_t tx_rate)
3457 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3460 if (queue_idx >= hw->mac.max_tx_queues)
3464 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3465 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3471 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3472 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3474 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3476 /* Set ARBTXRATE of queue X */
3477 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3478 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3485 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3486 u8 **mc_addr_ptr, u32 *vmdq)
3491 mc_addr = *mc_addr_ptr;
3492 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3497 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3498 struct rte_ether_addr *mc_addr_set,
3499 uint32_t nb_mc_addr)
3501 struct txgbe_hw *hw;
3504 hw = TXGBE_DEV_HW(dev);
3505 mc_addr_list = (u8 *)mc_addr_set;
3506 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3507 txgbe_dev_addr_list_itr, TRUE);
3511 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
3513 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3514 uint64_t systime_cycles;
3516 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
3517 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
3519 return systime_cycles;
3523 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3525 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3526 uint64_t rx_tstamp_cycles;
3528 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
3529 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
3530 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
3532 return rx_tstamp_cycles;
3536 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3538 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3539 uint64_t tx_tstamp_cycles;
3541 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
3542 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
3543 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
3545 return tx_tstamp_cycles;
3549 txgbe_start_timecounters(struct rte_eth_dev *dev)
3551 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3552 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3553 struct rte_eth_link link;
3554 uint32_t incval = 0;
3557 /* Get current link speed. */
3558 txgbe_dev_link_update(dev, 1);
3559 rte_eth_linkstatus_get(dev, &link);
3561 switch (link.link_speed) {
3562 case ETH_SPEED_NUM_100M:
3563 incval = TXGBE_INCVAL_100;
3564 shift = TXGBE_INCVAL_SHIFT_100;
3566 case ETH_SPEED_NUM_1G:
3567 incval = TXGBE_INCVAL_1GB;
3568 shift = TXGBE_INCVAL_SHIFT_1GB;
3570 case ETH_SPEED_NUM_10G:
3572 incval = TXGBE_INCVAL_10GB;
3573 shift = TXGBE_INCVAL_SHIFT_10GB;
3577 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
3579 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
3580 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3581 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3583 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3584 adapter->systime_tc.cc_shift = shift;
3585 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
3587 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3588 adapter->rx_tstamp_tc.cc_shift = shift;
3589 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3591 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3592 adapter->tx_tstamp_tc.cc_shift = shift;
3593 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3597 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3599 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3601 adapter->systime_tc.nsec += delta;
3602 adapter->rx_tstamp_tc.nsec += delta;
3603 adapter->tx_tstamp_tc.nsec += delta;
3609 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3612 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3614 ns = rte_timespec_to_ns(ts);
3615 /* Set the timecounters to a new value. */
3616 adapter->systime_tc.nsec = ns;
3617 adapter->rx_tstamp_tc.nsec = ns;
3618 adapter->tx_tstamp_tc.nsec = ns;
3624 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3626 uint64_t ns, systime_cycles;
3627 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3629 systime_cycles = txgbe_read_systime_cyclecounter(dev);
3630 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
3631 *ts = rte_ns_to_timespec(ns);
3637 txgbe_timesync_enable(struct rte_eth_dev *dev)
3639 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3642 /* Stop the timesync system time. */
3643 wr32(hw, TXGBE_TSTIMEINC, 0x0);
3644 /* Reset the timesync system time value. */
3645 wr32(hw, TXGBE_TSTIMEL, 0x0);
3646 wr32(hw, TXGBE_TSTIMEH, 0x0);
3648 txgbe_start_timecounters(dev);
3650 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3651 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
3652 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
3654 /* Enable timestamping of received PTP packets. */
3655 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3656 tsync_ctl |= TXGBE_TSRXCTL_ENA;
3657 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3659 /* Enable timestamping of transmitted PTP packets. */
3660 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3661 tsync_ctl |= TXGBE_TSTXCTL_ENA;
3662 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3670 txgbe_timesync_disable(struct rte_eth_dev *dev)
3672 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3675 /* Disable timestamping of transmitted PTP packets. */
3676 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3677 tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
3678 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3680 /* Disable timestamping of received PTP packets. */
3681 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3682 tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
3683 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3685 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3686 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
3688 /* Stop incrementating the System Time registers. */
3689 wr32(hw, TXGBE_TSTIMEINC, 0);
3695 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3696 struct timespec *timestamp,
3697 uint32_t flags __rte_unused)
3699 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3700 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3701 uint32_t tsync_rxctl;
3702 uint64_t rx_tstamp_cycles;
3705 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
3706 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
3709 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
3710 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3711 *timestamp = rte_ns_to_timespec(ns);
3717 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3718 struct timespec *timestamp)
3720 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3721 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3722 uint32_t tsync_txctl;
3723 uint64_t tx_tstamp_cycles;
3726 tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
3727 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
3730 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
3731 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3732 *timestamp = rte_ns_to_timespec(ns);
3738 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3742 const struct reg_info *reg_group;
3743 const struct reg_info **reg_set = txgbe_regs_others;
3745 while ((reg_group = reg_set[g_ind++]))
3746 count += txgbe_regs_group_count(reg_group);
3752 txgbe_get_regs(struct rte_eth_dev *dev,
3753 struct rte_dev_reg_info *regs)
3755 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3756 uint32_t *data = regs->data;
3759 const struct reg_info *reg_group;
3760 const struct reg_info **reg_set = txgbe_regs_others;
3763 regs->length = txgbe_get_reg_length(dev);
3764 regs->width = sizeof(uint32_t);
3768 /* Support only full register dump */
3769 if (regs->length == 0 ||
3770 regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
3771 regs->version = hw->mac.type << 24 |
3772 hw->revision_id << 16 |
3774 while ((reg_group = reg_set[g_ind++]))
3775 count += txgbe_read_regs_group(dev, &data[count],
3784 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
3786 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3788 /* Return unit is byte count */
3789 return hw->rom.word_size * 2;
3793 txgbe_get_eeprom(struct rte_eth_dev *dev,
3794 struct rte_dev_eeprom_info *in_eeprom)
3796 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3797 struct txgbe_rom_info *eeprom = &hw->rom;
3798 uint16_t *data = in_eeprom->data;
3801 first = in_eeprom->offset >> 1;
3802 length = in_eeprom->length >> 1;
3803 if (first > hw->rom.word_size ||
3804 ((first + length) > hw->rom.word_size))
3807 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3809 return eeprom->readw_buffer(hw, first, length, data);
3813 txgbe_set_eeprom(struct rte_eth_dev *dev,
3814 struct rte_dev_eeprom_info *in_eeprom)
3816 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3817 struct txgbe_rom_info *eeprom = &hw->rom;
3818 uint16_t *data = in_eeprom->data;
3821 first = in_eeprom->offset >> 1;
3822 length = in_eeprom->length >> 1;
3823 if (first > hw->rom.word_size ||
3824 ((first + length) > hw->rom.word_size))
3827 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3829 return eeprom->writew_buffer(hw, first, length, data);
3833 txgbe_get_module_info(struct rte_eth_dev *dev,
3834 struct rte_eth_dev_module_info *modinfo)
3836 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3838 uint8_t sff8472_rev, addr_mode;
3839 bool page_swap = false;
3841 /* Check whether we support SFF-8472 or not */
3842 status = hw->phy.read_i2c_eeprom(hw,
3843 TXGBE_SFF_SFF_8472_COMP,
3848 /* addressing mode is not supported */
3849 status = hw->phy.read_i2c_eeprom(hw,
3850 TXGBE_SFF_SFF_8472_SWAP,
3855 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
3857 "Address change required to access page 0xA2, "
3858 "but not supported. Please report the module "
3859 "type to the driver maintainers.");
3863 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3864 /* We have a SFP, but it does not support SFF-8472 */
3865 modinfo->type = RTE_ETH_MODULE_SFF_8079;
3866 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
3868 /* We have a SFP which supports a revision of SFF-8472. */
3869 modinfo->type = RTE_ETH_MODULE_SFF_8472;
3870 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
3877 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
3878 struct rte_dev_eeprom_info *info)
3880 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3881 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
3882 uint8_t databyte = 0xFF;
3883 uint8_t *data = info->data;
3886 if (info->length == 0)
3889 for (i = info->offset; i < info->offset + info->length; i++) {
3890 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
3891 status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
3893 status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
3898 data[i - info->offset] = databyte;
3905 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3908 case txgbe_mac_raptor:
3916 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
3917 struct rte_eth_dcb_info *dcb_info)
3919 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3920 struct txgbe_dcb_tc_config *tc;
3921 struct rte_eth_dcb_tc_queue_mapping *tc_queue;
3925 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
3926 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
3928 dcb_info->nb_tcs = 1;
3930 tc_queue = &dcb_info->tc_queue;
3931 nb_tcs = dcb_info->nb_tcs;
3933 if (dcb_config->vt_mode) { /* vt is enabled */
3934 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3935 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3936 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3937 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
3938 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
3939 for (j = 0; j < nb_tcs; j++) {
3940 tc_queue->tc_rxq[0][j].base = j;
3941 tc_queue->tc_rxq[0][j].nb_queue = 1;
3942 tc_queue->tc_txq[0][j].base = j;
3943 tc_queue->tc_txq[0][j].nb_queue = 1;
3946 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
3947 for (j = 0; j < nb_tcs; j++) {
3948 tc_queue->tc_rxq[i][j].base =
3950 tc_queue->tc_rxq[i][j].nb_queue = 1;
3951 tc_queue->tc_txq[i][j].base =
3953 tc_queue->tc_txq[i][j].nb_queue = 1;
3957 } else { /* vt is disabled */
3958 struct rte_eth_dcb_rx_conf *rx_conf =
3959 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3960 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3961 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
3962 if (dcb_info->nb_tcs == ETH_4_TCS) {
3963 for (i = 0; i < dcb_info->nb_tcs; i++) {
3964 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
3965 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
3967 dcb_info->tc_queue.tc_txq[0][0].base = 0;
3968 dcb_info->tc_queue.tc_txq[0][1].base = 64;
3969 dcb_info->tc_queue.tc_txq[0][2].base = 96;
3970 dcb_info->tc_queue.tc_txq[0][3].base = 112;
3971 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
3972 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
3973 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
3974 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
3975 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
3976 for (i = 0; i < dcb_info->nb_tcs; i++) {
3977 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
3978 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
3980 dcb_info->tc_queue.tc_txq[0][0].base = 0;
3981 dcb_info->tc_queue.tc_txq[0][1].base = 32;
3982 dcb_info->tc_queue.tc_txq[0][2].base = 64;
3983 dcb_info->tc_queue.tc_txq[0][3].base = 80;
3984 dcb_info->tc_queue.tc_txq[0][4].base = 96;
3985 dcb_info->tc_queue.tc_txq[0][5].base = 104;
3986 dcb_info->tc_queue.tc_txq[0][6].base = 112;
3987 dcb_info->tc_queue.tc_txq[0][7].base = 120;
3988 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
3989 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
3990 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
3991 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
3992 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
3993 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
3994 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
3995 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
3998 for (i = 0; i < dcb_info->nb_tcs; i++) {
3999 tc = &dcb_config->tc_config[i];
4000 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4005 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4006 .dev_configure = txgbe_dev_configure,
4007 .dev_infos_get = txgbe_dev_info_get,
4008 .dev_start = txgbe_dev_start,
4009 .dev_stop = txgbe_dev_stop,
4010 .dev_set_link_up = txgbe_dev_set_link_up,
4011 .dev_set_link_down = txgbe_dev_set_link_down,
4012 .dev_close = txgbe_dev_close,
4013 .dev_reset = txgbe_dev_reset,
4014 .promiscuous_enable = txgbe_dev_promiscuous_enable,
4015 .promiscuous_disable = txgbe_dev_promiscuous_disable,
4016 .allmulticast_enable = txgbe_dev_allmulticast_enable,
4017 .allmulticast_disable = txgbe_dev_allmulticast_disable,
4018 .link_update = txgbe_dev_link_update,
4019 .stats_get = txgbe_dev_stats_get,
4020 .xstats_get = txgbe_dev_xstats_get,
4021 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
4022 .stats_reset = txgbe_dev_stats_reset,
4023 .xstats_reset = txgbe_dev_xstats_reset,
4024 .xstats_get_names = txgbe_dev_xstats_get_names,
4025 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
4026 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
4027 .fw_version_get = txgbe_fw_version_get,
4028 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
4029 .mtu_set = txgbe_dev_mtu_set,
4030 .vlan_filter_set = txgbe_vlan_filter_set,
4031 .vlan_tpid_set = txgbe_vlan_tpid_set,
4032 .vlan_offload_set = txgbe_vlan_offload_set,
4033 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
4034 .rx_queue_start = txgbe_dev_rx_queue_start,
4035 .rx_queue_stop = txgbe_dev_rx_queue_stop,
4036 .tx_queue_start = txgbe_dev_tx_queue_start,
4037 .tx_queue_stop = txgbe_dev_tx_queue_stop,
4038 .rx_queue_setup = txgbe_dev_rx_queue_setup,
4039 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
4040 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
4041 .rx_queue_release = txgbe_dev_rx_queue_release,
4042 .tx_queue_setup = txgbe_dev_tx_queue_setup,
4043 .tx_queue_release = txgbe_dev_tx_queue_release,
4044 .dev_led_on = txgbe_dev_led_on,
4045 .dev_led_off = txgbe_dev_led_off,
4046 .flow_ctrl_get = txgbe_flow_ctrl_get,
4047 .flow_ctrl_set = txgbe_flow_ctrl_set,
4048 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
4049 .mac_addr_add = txgbe_add_rar,
4050 .mac_addr_remove = txgbe_remove_rar,
4051 .mac_addr_set = txgbe_set_default_mac_addr,
4052 .uc_hash_table_set = txgbe_uc_hash_table_set,
4053 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
4054 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
4055 .reta_update = txgbe_dev_rss_reta_update,
4056 .reta_query = txgbe_dev_rss_reta_query,
4057 .rss_hash_update = txgbe_dev_rss_hash_update,
4058 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
4059 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
4060 .rxq_info_get = txgbe_rxq_info_get,
4061 .txq_info_get = txgbe_txq_info_get,
4062 .timesync_enable = txgbe_timesync_enable,
4063 .timesync_disable = txgbe_timesync_disable,
4064 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
4065 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
4066 .get_reg = txgbe_get_regs,
4067 .get_eeprom_length = txgbe_get_eeprom_length,
4068 .get_eeprom = txgbe_get_eeprom,
4069 .set_eeprom = txgbe_set_eeprom,
4070 .get_module_info = txgbe_get_module_info,
4071 .get_module_eeprom = txgbe_get_module_eeprom,
4072 .get_dcb_info = txgbe_dev_get_dcb_info,
4073 .timesync_adjust_time = txgbe_timesync_adjust_time,
4074 .timesync_read_time = txgbe_timesync_read_time,
4075 .timesync_write_time = txgbe_timesync_write_time,
4076 .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
4079 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
4080 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
4081 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
4083 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
4084 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
4086 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
4087 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
4089 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
4090 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
4093 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
4094 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);