1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
26 static const struct reg_info txgbe_regs_general[] = {
27 {TXGBE_RST, 1, 1, "TXGBE_RST"},
28 {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30 {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
36 static const struct reg_info txgbe_regs_nvm[] = {
40 static const struct reg_info txgbe_regs_interrupt[] = {
44 static const struct reg_info txgbe_regs_fctl_others[] = {
48 static const struct reg_info txgbe_regs_rxdma[] = {
52 static const struct reg_info txgbe_regs_rx[] = {
56 static struct reg_info txgbe_regs_tx[] = {
60 static const struct reg_info txgbe_regs_wakeup[] = {
64 static const struct reg_info txgbe_regs_dcb[] = {
68 static const struct reg_info txgbe_regs_mac[] = {
72 static const struct reg_info txgbe_regs_diagnostic[] = {
77 static const struct reg_info *txgbe_regs_others[] = {
81 txgbe_regs_fctl_others,
88 txgbe_regs_diagnostic,
91 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
92 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
93 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
96 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
97 static int txgbe_dev_close(struct rte_eth_dev *dev);
98 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
99 int wait_to_complete);
100 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
101 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
102 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
105 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
106 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
107 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
108 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
109 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
110 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
111 struct rte_intr_handle *handle);
112 static void txgbe_dev_interrupt_handler(void *param);
113 static void txgbe_dev_interrupt_delayed_handler(void *param);
114 static void txgbe_configure_msix(struct rte_eth_dev *dev);
116 static int txgbe_filter_restore(struct rte_eth_dev *dev);
117 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
119 #define TXGBE_SET_HWSTRIP(h, q) do {\
120 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
121 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
122 (h)->bitmap[idx] |= 1 << bit;\
125 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
126 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
127 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
128 (h)->bitmap[idx] &= ~(1 << bit);\
131 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
132 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
133 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
134 (r) = (h)->bitmap[idx] >> bit & 1;\
138 * The set of PCI devices this driver supports
140 static const struct rte_pci_id pci_id_txgbe_map[] = {
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
143 { .vendor_id = 0, /* sentinel */ },
146 static const struct rte_eth_desc_lim rx_desc_lim = {
147 .nb_max = TXGBE_RING_DESC_MAX,
148 .nb_min = TXGBE_RING_DESC_MIN,
149 .nb_align = TXGBE_RXD_ALIGN,
152 static const struct rte_eth_desc_lim tx_desc_lim = {
153 .nb_max = TXGBE_RING_DESC_MAX,
154 .nb_min = TXGBE_RING_DESC_MIN,
155 .nb_align = TXGBE_TXD_ALIGN,
156 .nb_seg_max = TXGBE_TX_MAX_SEG,
157 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
160 static const struct eth_dev_ops txgbe_eth_dev_ops;
162 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
163 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
164 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
166 HW_XSTAT(mng_bmc2host_packets),
167 HW_XSTAT(mng_host2bmc_packets),
169 HW_XSTAT(rx_packets),
170 HW_XSTAT(tx_packets),
173 HW_XSTAT(rx_total_bytes),
174 HW_XSTAT(rx_total_packets),
175 HW_XSTAT(tx_total_packets),
176 HW_XSTAT(rx_total_missed_packets),
177 HW_XSTAT(rx_broadcast_packets),
178 HW_XSTAT(rx_multicast_packets),
179 HW_XSTAT(rx_management_packets),
180 HW_XSTAT(tx_management_packets),
181 HW_XSTAT(rx_management_dropped),
184 HW_XSTAT(rx_crc_errors),
185 HW_XSTAT(rx_illegal_byte_errors),
186 HW_XSTAT(rx_error_bytes),
187 HW_XSTAT(rx_mac_short_packet_dropped),
188 HW_XSTAT(rx_length_errors),
189 HW_XSTAT(rx_undersize_errors),
190 HW_XSTAT(rx_fragment_errors),
191 HW_XSTAT(rx_oversize_errors),
192 HW_XSTAT(rx_jabber_errors),
193 HW_XSTAT(rx_l3_l4_xsum_error),
194 HW_XSTAT(mac_local_errors),
195 HW_XSTAT(mac_remote_errors),
198 HW_XSTAT(flow_director_added_filters),
199 HW_XSTAT(flow_director_removed_filters),
200 HW_XSTAT(flow_director_filter_add_errors),
201 HW_XSTAT(flow_director_filter_remove_errors),
202 HW_XSTAT(flow_director_matched_filters),
203 HW_XSTAT(flow_director_missed_filters),
206 HW_XSTAT(rx_fcoe_crc_errors),
207 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
208 HW_XSTAT(rx_fcoe_dropped),
209 HW_XSTAT(rx_fcoe_packets),
210 HW_XSTAT(tx_fcoe_packets),
211 HW_XSTAT(rx_fcoe_bytes),
212 HW_XSTAT(tx_fcoe_bytes),
213 HW_XSTAT(rx_fcoe_no_ddp),
214 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
217 HW_XSTAT(tx_macsec_pkts_untagged),
218 HW_XSTAT(tx_macsec_pkts_encrypted),
219 HW_XSTAT(tx_macsec_pkts_protected),
220 HW_XSTAT(tx_macsec_octets_encrypted),
221 HW_XSTAT(tx_macsec_octets_protected),
222 HW_XSTAT(rx_macsec_pkts_untagged),
223 HW_XSTAT(rx_macsec_pkts_badtag),
224 HW_XSTAT(rx_macsec_pkts_nosci),
225 HW_XSTAT(rx_macsec_pkts_unknownsci),
226 HW_XSTAT(rx_macsec_octets_decrypted),
227 HW_XSTAT(rx_macsec_octets_validated),
228 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
229 HW_XSTAT(rx_macsec_sc_pkts_delayed),
230 HW_XSTAT(rx_macsec_sc_pkts_late),
231 HW_XSTAT(rx_macsec_sa_pkts_ok),
232 HW_XSTAT(rx_macsec_sa_pkts_invalid),
233 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
234 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
235 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
238 HW_XSTAT(rx_size_64_packets),
239 HW_XSTAT(rx_size_65_to_127_packets),
240 HW_XSTAT(rx_size_128_to_255_packets),
241 HW_XSTAT(rx_size_256_to_511_packets),
242 HW_XSTAT(rx_size_512_to_1023_packets),
243 HW_XSTAT(rx_size_1024_to_max_packets),
244 HW_XSTAT(tx_size_64_packets),
245 HW_XSTAT(tx_size_65_to_127_packets),
246 HW_XSTAT(tx_size_128_to_255_packets),
247 HW_XSTAT(tx_size_256_to_511_packets),
248 HW_XSTAT(tx_size_512_to_1023_packets),
249 HW_XSTAT(tx_size_1024_to_max_packets),
252 HW_XSTAT(tx_xon_packets),
253 HW_XSTAT(rx_xon_packets),
254 HW_XSTAT(tx_xoff_packets),
255 HW_XSTAT(rx_xoff_packets),
257 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
258 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
259 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
260 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
263 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
264 sizeof(rte_txgbe_stats_strings[0]))
266 /* Per-priority statistics */
267 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
268 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
269 UP_XSTAT(rx_up_packets),
270 UP_XSTAT(tx_up_packets),
271 UP_XSTAT(rx_up_bytes),
272 UP_XSTAT(tx_up_bytes),
273 UP_XSTAT(rx_up_drop_packets),
275 UP_XSTAT(tx_up_xon_packets),
276 UP_XSTAT(rx_up_xon_packets),
277 UP_XSTAT(tx_up_xoff_packets),
278 UP_XSTAT(rx_up_xoff_packets),
279 UP_XSTAT(rx_up_dropped),
280 UP_XSTAT(rx_up_mbuf_alloc_errors),
281 UP_XSTAT(tx_up_xon2off_packets),
284 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
285 sizeof(rte_txgbe_up_strings[0]))
287 /* Per-queue statistics */
288 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
289 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
290 QP_XSTAT(rx_qp_packets),
291 QP_XSTAT(tx_qp_packets),
292 QP_XSTAT(rx_qp_bytes),
293 QP_XSTAT(tx_qp_bytes),
294 QP_XSTAT(rx_qp_mc_packets),
297 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
298 sizeof(rte_txgbe_qp_strings[0]))
301 txgbe_is_sfp(struct txgbe_hw *hw)
303 switch (hw->phy.type) {
304 case txgbe_phy_sfp_avago:
305 case txgbe_phy_sfp_ftl:
306 case txgbe_phy_sfp_intel:
307 case txgbe_phy_sfp_unknown:
308 case txgbe_phy_sfp_tyco_passive:
309 case txgbe_phy_sfp_unknown_passive:
316 static inline int32_t
317 txgbe_pf_reset_hw(struct txgbe_hw *hw)
322 status = hw->mac.reset_hw(hw);
324 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
325 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
326 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
327 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
330 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
336 txgbe_enable_intr(struct rte_eth_dev *dev)
338 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
339 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
341 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
342 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
343 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
348 txgbe_disable_intr(struct txgbe_hw *hw)
350 PMD_INIT_FUNC_TRACE();
352 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
353 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
354 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
359 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
364 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
365 struct txgbe_stat_mappings *stat_mappings =
366 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
367 uint32_t qsmr_mask = 0;
368 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
372 if (hw->mac.type != txgbe_mac_raptor)
375 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
378 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
379 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
382 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
383 if (n >= TXGBE_NB_STAT_MAPPING) {
384 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
387 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
389 /* Now clear any previous stat_idx set */
390 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
392 stat_mappings->tqsm[n] &= ~clearing_mask;
394 stat_mappings->rqsm[n] &= ~clearing_mask;
396 q_map = (uint32_t)stat_idx;
397 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
398 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
400 stat_mappings->tqsm[n] |= qsmr_mask;
402 stat_mappings->rqsm[n] |= qsmr_mask;
404 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
405 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
407 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
408 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
413 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
417 struct txgbe_dcb_tc_config *tc;
419 UNREFERENCED_PARAMETER(hw);
421 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
422 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
423 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
424 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
425 tc = &dcb_config->tc_config[i];
426 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
427 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
428 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
429 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
430 tc->pfc = txgbe_dcb_pfc_disabled;
433 /* Initialize default user to priority mapping, UPx->TC0 */
434 tc = &dcb_config->tc_config[0];
435 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
436 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
437 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
438 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
439 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
441 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
442 dcb_config->pfc_mode_enable = false;
443 dcb_config->vt_mode = true;
444 dcb_config->round_robin_enable = false;
445 /* support all DCB capabilities */
446 dcb_config->support.capabilities = 0xFF;
450 * Ensure that all locks are released before first NVM or PHY access
453 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
458 * These ones are more tricky since they are common to all ports; but
459 * swfw_sync retries last long enough (1s) to be almost sure that if
460 * lock can not be taken it is due to an improper lock of the
463 mask = TXGBE_MNGSEM_SWPHY |
465 TXGBE_MNGSEM_SWFLASH;
466 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
467 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
469 hw->mac.release_swfw_sync(hw, mask);
473 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
475 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
476 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
477 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
478 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
479 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
480 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
481 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
483 const struct rte_memzone *mz;
488 PMD_INIT_FUNC_TRACE();
490 eth_dev->dev_ops = &txgbe_eth_dev_ops;
491 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
492 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
493 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
494 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
495 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
496 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
499 * For secondary processes, we don't initialise any further as primary
500 * has already done this work. Only check we don't need a different
501 * RX and TX function.
503 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
504 struct txgbe_tx_queue *txq;
505 /* TX queue function in primary, set by last queue initialized
506 * Tx queue may not initialized by primary process
508 if (eth_dev->data->tx_queues) {
509 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
510 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
511 txgbe_set_tx_function(eth_dev, txq);
513 /* Use default TX function if we get here */
514 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
515 "Using default TX function.");
518 txgbe_set_rx_function(eth_dev);
523 rte_eth_copy_pci_info(eth_dev, pci_dev);
525 /* Vendor and Device ID need to be set before init of shared code */
526 hw->device_id = pci_dev->id.device_id;
527 hw->vendor_id = pci_dev->id.vendor_id;
528 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
529 hw->allow_unsupported_sfp = 1;
531 /* Reserve memory for interrupt status block */
532 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
533 16, TXGBE_ALIGN, SOCKET_ID_ANY);
537 hw->isb_dma = TMZ_PADDR(mz);
538 hw->isb_mem = TMZ_VADDR(mz);
540 /* Initialize the shared code (base driver) */
541 err = txgbe_init_shared_code(hw);
543 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
547 /* Unlock any pending hardware semaphore */
548 txgbe_swfw_lock_reset(hw);
550 #ifdef RTE_LIB_SECURITY
551 /* Initialize security_ctx only for primary process*/
552 if (txgbe_ipsec_ctx_create(eth_dev))
556 /* Initialize DCB configuration*/
557 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
558 txgbe_dcb_init(hw, dcb_config);
560 /* Get Hardware Flow Control setting */
561 hw->fc.requested_mode = txgbe_fc_full;
562 hw->fc.current_mode = txgbe_fc_full;
563 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
564 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
565 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
566 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
570 err = hw->rom.init_params(hw);
572 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
576 /* Make sure we have a good EEPROM before we read from it */
577 err = hw->rom.validate_checksum(hw, &csum);
579 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
583 err = hw->mac.init_hw(hw);
586 * Devices with copper phys will fail to initialise if txgbe_init_hw()
587 * is called too soon after the kernel driver unbinding/binding occurs.
588 * The failure occurs in txgbe_identify_phy() for all devices,
589 * but for non-copper devies, txgbe_identify_sfp_module() is
590 * also called. See txgbe_identify_phy(). The reason for the
591 * failure is not known, and only occuts when virtualisation features
592 * are disabled in the bios. A delay of 200ms was found to be enough by
593 * trial-and-error, and is doubled to be safe.
595 if (err && hw->phy.media_type == txgbe_media_type_copper) {
597 err = hw->mac.init_hw(hw);
600 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
603 if (err == TXGBE_ERR_EEPROM_VERSION) {
604 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
605 "LOM. Please be aware there may be issues associated "
606 "with your hardware.");
607 PMD_INIT_LOG(ERR, "If you are experiencing problems "
608 "please contact your hardware representative "
609 "who provided you with this hardware.");
610 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
611 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
614 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
618 /* Reset the hw statistics */
619 txgbe_dev_stats_reset(eth_dev);
621 /* disable interrupt */
622 txgbe_disable_intr(hw);
624 /* Allocate memory for storing MAC addresses */
625 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
626 hw->mac.num_rar_entries, 0);
627 if (eth_dev->data->mac_addrs == NULL) {
629 "Failed to allocate %u bytes needed to store "
631 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
635 /* Copy the permanent MAC address */
636 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
637 ð_dev->data->mac_addrs[0]);
639 /* Allocate memory for storing hash filter MAC addresses */
640 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
641 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
642 if (eth_dev->data->hash_mac_addrs == NULL) {
644 "Failed to allocate %d bytes needed to store MAC addresses",
645 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
649 /* initialize the vfta */
650 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
652 /* initialize the hw strip bitmap*/
653 memset(hwstrip, 0, sizeof(*hwstrip));
655 /* initialize PF if max_vfs not zero */
656 ret = txgbe_pf_host_init(eth_dev);
658 rte_free(eth_dev->data->mac_addrs);
659 eth_dev->data->mac_addrs = NULL;
660 rte_free(eth_dev->data->hash_mac_addrs);
661 eth_dev->data->hash_mac_addrs = NULL;
665 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
666 /* let hardware know driver is loaded */
667 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
668 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
669 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
670 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
673 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
674 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
675 (int)hw->mac.type, (int)hw->phy.type,
676 (int)hw->phy.sfp_type);
678 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
679 (int)hw->mac.type, (int)hw->phy.type);
681 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
682 eth_dev->data->port_id, pci_dev->id.vendor_id,
683 pci_dev->id.device_id);
685 rte_intr_callback_register(intr_handle,
686 txgbe_dev_interrupt_handler, eth_dev);
688 /* enable uio/vfio intr/eventfd mapping */
689 rte_intr_enable(intr_handle);
691 /* enable support intr */
692 txgbe_enable_intr(eth_dev);
694 /* initialize filter info */
695 memset(filter_info, 0,
696 sizeof(struct txgbe_filter_info));
698 /* initialize 5tuple filter list */
699 TAILQ_INIT(&filter_info->fivetuple_list);
701 /* initialize flow director filter list & hash */
702 txgbe_fdir_filter_init(eth_dev);
704 /* initialize l2 tunnel filter list & hash */
705 txgbe_l2_tn_filter_init(eth_dev);
707 /* initialize flow filter lists */
708 txgbe_filterlist_init();
710 /* initialize bandwidth configuration info */
711 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
713 /* initialize Traffic Manager configuration */
714 txgbe_tm_conf_init(eth_dev);
720 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
722 PMD_INIT_FUNC_TRACE();
724 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
727 txgbe_dev_close(eth_dev);
732 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
734 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
735 struct txgbe_5tuple_filter *p_5tuple;
737 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
738 TAILQ_REMOVE(&filter_info->fivetuple_list,
743 memset(filter_info->fivetuple_mask, 0,
744 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
749 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
751 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
752 struct txgbe_fdir_filter *fdir_filter;
754 if (fdir_info->hash_map)
755 rte_free(fdir_info->hash_map);
756 if (fdir_info->hash_handle)
757 rte_hash_free(fdir_info->hash_handle);
759 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
760 TAILQ_REMOVE(&fdir_info->fdir_list,
763 rte_free(fdir_filter);
769 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
771 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
772 struct txgbe_l2_tn_filter *l2_tn_filter;
774 if (l2_tn_info->hash_map)
775 rte_free(l2_tn_info->hash_map);
776 if (l2_tn_info->hash_handle)
777 rte_hash_free(l2_tn_info->hash_handle);
779 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
780 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
783 rte_free(l2_tn_filter);
789 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
791 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
792 char fdir_hash_name[RTE_HASH_NAMESIZE];
793 struct rte_hash_parameters fdir_hash_params = {
794 .name = fdir_hash_name,
795 .entries = TXGBE_MAX_FDIR_FILTER_NUM,
796 .key_len = sizeof(struct txgbe_atr_input),
797 .hash_func = rte_hash_crc,
798 .hash_func_init_val = 0,
799 .socket_id = rte_socket_id(),
802 TAILQ_INIT(&fdir_info->fdir_list);
803 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
804 "fdir_%s", TDEV_NAME(eth_dev));
805 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
806 if (!fdir_info->hash_handle) {
807 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
810 fdir_info->hash_map = rte_zmalloc("txgbe",
811 sizeof(struct txgbe_fdir_filter *) *
812 TXGBE_MAX_FDIR_FILTER_NUM,
814 if (!fdir_info->hash_map) {
816 "Failed to allocate memory for fdir hash map!");
819 fdir_info->mask_added = FALSE;
824 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
826 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
827 char l2_tn_hash_name[RTE_HASH_NAMESIZE];
828 struct rte_hash_parameters l2_tn_hash_params = {
829 .name = l2_tn_hash_name,
830 .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
831 .key_len = sizeof(struct txgbe_l2_tn_key),
832 .hash_func = rte_hash_crc,
833 .hash_func_init_val = 0,
834 .socket_id = rte_socket_id(),
837 TAILQ_INIT(&l2_tn_info->l2_tn_list);
838 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
839 "l2_tn_%s", TDEV_NAME(eth_dev));
840 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
841 if (!l2_tn_info->hash_handle) {
842 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
845 l2_tn_info->hash_map = rte_zmalloc("txgbe",
846 sizeof(struct txgbe_l2_tn_filter *) *
847 TXGBE_MAX_L2_TN_FILTER_NUM,
849 if (!l2_tn_info->hash_map) {
851 "Failed to allocate memory for L2 TN hash map!");
854 l2_tn_info->e_tag_en = FALSE;
855 l2_tn_info->e_tag_fwd_en = FALSE;
856 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
862 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
863 struct rte_pci_device *pci_dev)
865 struct rte_eth_dev *pf_ethdev;
866 struct rte_eth_devargs eth_da;
869 if (pci_dev->device.devargs) {
870 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
875 memset(ð_da, 0, sizeof(eth_da));
878 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
879 sizeof(struct txgbe_adapter),
880 eth_dev_pci_specific_init, pci_dev,
881 eth_txgbe_dev_init, NULL);
883 if (retval || eth_da.nb_representor_ports < 1)
886 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
887 if (pf_ethdev == NULL)
893 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
895 struct rte_eth_dev *ethdev;
897 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
901 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
904 static struct rte_pci_driver rte_txgbe_pmd = {
905 .id_table = pci_id_txgbe_map,
906 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
907 RTE_PCI_DRV_INTR_LSC,
908 .probe = eth_txgbe_pci_probe,
909 .remove = eth_txgbe_pci_remove,
913 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
915 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
916 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
921 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
922 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
923 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
928 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
930 /* update local VFTA copy */
931 shadow_vfta->vfta[vid_idx] = vfta;
937 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
939 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
940 struct txgbe_rx_queue *rxq;
942 uint32_t rxcfg, rxbal, rxbah;
945 txgbe_vlan_hw_strip_enable(dev, queue);
947 txgbe_vlan_hw_strip_disable(dev, queue);
949 rxq = dev->data->rx_queues[queue];
950 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
951 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
952 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
953 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
954 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
955 !(rxcfg & TXGBE_RXCFG_VLAN);
956 rxcfg |= TXGBE_RXCFG_VLAN;
958 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
959 (rxcfg & TXGBE_RXCFG_VLAN);
960 rxcfg &= ~TXGBE_RXCFG_VLAN;
962 rxcfg &= ~TXGBE_RXCFG_ENA;
965 /* set vlan strip for ring */
966 txgbe_dev_rx_queue_stop(dev, queue);
967 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
968 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
969 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
970 txgbe_dev_rx_queue_start(dev, queue);
975 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
976 enum rte_vlan_type vlan_type,
979 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
981 uint32_t portctrl, vlan_ext, qinq;
983 portctrl = rd32(hw, TXGBE_PORTCTL);
985 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
986 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
988 case ETH_VLAN_TYPE_INNER:
990 wr32m(hw, TXGBE_VLANCTL,
991 TXGBE_VLANCTL_TPID_MASK,
992 TXGBE_VLANCTL_TPID(tpid));
993 wr32m(hw, TXGBE_DMATXCTRL,
994 TXGBE_DMATXCTRL_TPID_MASK,
995 TXGBE_DMATXCTRL_TPID(tpid));
998 PMD_DRV_LOG(ERR, "Inner type is not supported"
1003 wr32m(hw, TXGBE_TAGTPID(0),
1004 TXGBE_TAGTPID_LSB_MASK,
1005 TXGBE_TAGTPID_LSB(tpid));
1008 case ETH_VLAN_TYPE_OUTER:
1010 /* Only the high 16-bits is valid */
1011 wr32m(hw, TXGBE_EXTAG,
1012 TXGBE_EXTAG_VLAN_MASK,
1013 TXGBE_EXTAG_VLAN(tpid));
1015 wr32m(hw, TXGBE_VLANCTL,
1016 TXGBE_VLANCTL_TPID_MASK,
1017 TXGBE_VLANCTL_TPID(tpid));
1018 wr32m(hw, TXGBE_DMATXCTRL,
1019 TXGBE_DMATXCTRL_TPID_MASK,
1020 TXGBE_DMATXCTRL_TPID(tpid));
1024 wr32m(hw, TXGBE_TAGTPID(0),
1025 TXGBE_TAGTPID_MSB_MASK,
1026 TXGBE_TAGTPID_MSB(tpid));
1030 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1038 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1040 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1043 PMD_INIT_FUNC_TRACE();
1045 /* Filter Table Disable */
1046 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1047 vlnctrl &= ~TXGBE_VLANCTL_VFE;
1048 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1052 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1054 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1055 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1059 PMD_INIT_FUNC_TRACE();
1061 /* Filter Table Enable */
1062 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1063 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1064 vlnctrl |= TXGBE_VLANCTL_VFE;
1065 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1067 /* write whatever is in local vfta copy */
1068 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1069 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1073 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1075 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1076 struct txgbe_rx_queue *rxq;
1078 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1082 TXGBE_SET_HWSTRIP(hwstrip, queue);
1084 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1086 if (queue >= dev->data->nb_rx_queues)
1089 rxq = dev->data->rx_queues[queue];
1092 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1093 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1095 rxq->vlan_flags = PKT_RX_VLAN;
1096 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1101 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1103 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1106 PMD_INIT_FUNC_TRACE();
1108 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1109 ctrl &= ~TXGBE_RXCFG_VLAN;
1110 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1112 /* record those setting for HW strip per queue */
1113 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1117 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1119 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1122 PMD_INIT_FUNC_TRACE();
1124 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1125 ctrl |= TXGBE_RXCFG_VLAN;
1126 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1128 /* record those setting for HW strip per queue */
1129 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1133 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1135 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1138 PMD_INIT_FUNC_TRACE();
1140 ctrl = rd32(hw, TXGBE_PORTCTL);
1141 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1142 ctrl &= ~TXGBE_PORTCTL_QINQ;
1143 wr32(hw, TXGBE_PORTCTL, ctrl);
1147 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1149 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1150 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1151 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1154 PMD_INIT_FUNC_TRACE();
1156 ctrl = rd32(hw, TXGBE_PORTCTL);
1157 ctrl |= TXGBE_PORTCTL_VLANEXT;
1158 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1159 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1160 ctrl |= TXGBE_PORTCTL_QINQ;
1161 wr32(hw, TXGBE_PORTCTL, ctrl);
1165 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1167 struct txgbe_rx_queue *rxq;
1170 PMD_INIT_FUNC_TRACE();
1172 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1173 rxq = dev->data->rx_queues[i];
1175 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1176 txgbe_vlan_strip_queue_set(dev, i, 1);
1178 txgbe_vlan_strip_queue_set(dev, i, 0);
1183 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1186 struct rte_eth_rxmode *rxmode;
1187 struct txgbe_rx_queue *rxq;
1189 if (mask & ETH_VLAN_STRIP_MASK) {
1190 rxmode = &dev->data->dev_conf.rxmode;
1191 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1192 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1193 rxq = dev->data->rx_queues[i];
1194 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1197 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1198 rxq = dev->data->rx_queues[i];
1199 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1205 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1207 struct rte_eth_rxmode *rxmode;
1208 rxmode = &dev->data->dev_conf.rxmode;
1210 if (mask & ETH_VLAN_STRIP_MASK)
1211 txgbe_vlan_hw_strip_config(dev);
1213 if (mask & ETH_VLAN_FILTER_MASK) {
1214 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1215 txgbe_vlan_hw_filter_enable(dev);
1217 txgbe_vlan_hw_filter_disable(dev);
1220 if (mask & ETH_VLAN_EXTEND_MASK) {
1221 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1222 txgbe_vlan_hw_extend_enable(dev);
1224 txgbe_vlan_hw_extend_disable(dev);
1231 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1233 txgbe_config_vlan_strip_on_all_queues(dev, mask);
1235 txgbe_vlan_offload_config(dev, mask);
1241 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1243 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1244 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1245 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1247 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1248 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1252 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1254 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1259 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1262 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1268 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1269 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1270 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1271 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1276 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1278 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1279 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1280 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1282 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1283 /* check multi-queue mode */
1284 switch (dev_conf->rxmode.mq_mode) {
1285 case ETH_MQ_RX_VMDQ_DCB:
1286 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1288 case ETH_MQ_RX_VMDQ_DCB_RSS:
1289 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1290 PMD_INIT_LOG(ERR, "SRIOV active,"
1291 " unsupported mq_mode rx %d.",
1292 dev_conf->rxmode.mq_mode);
1295 case ETH_MQ_RX_VMDQ_RSS:
1296 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1297 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1298 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1299 PMD_INIT_LOG(ERR, "SRIOV is active,"
1300 " invalid queue number"
1301 " for VMDQ RSS, allowed"
1302 " value are 1, 2 or 4.");
1306 case ETH_MQ_RX_VMDQ_ONLY:
1307 case ETH_MQ_RX_NONE:
1308 /* if nothing mq mode configure, use default scheme */
1309 dev->data->dev_conf.rxmode.mq_mode =
1310 ETH_MQ_RX_VMDQ_ONLY;
1312 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1313 /* SRIOV only works in VMDq enable mode */
1314 PMD_INIT_LOG(ERR, "SRIOV is active,"
1315 " wrong mq_mode rx %d.",
1316 dev_conf->rxmode.mq_mode);
1320 switch (dev_conf->txmode.mq_mode) {
1321 case ETH_MQ_TX_VMDQ_DCB:
1322 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1323 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1325 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1326 dev->data->dev_conf.txmode.mq_mode =
1327 ETH_MQ_TX_VMDQ_ONLY;
1331 /* check valid queue number */
1332 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1333 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1334 PMD_INIT_LOG(ERR, "SRIOV is active,"
1335 " nb_rx_q=%d nb_tx_q=%d queue number"
1336 " must be less than or equal to %d.",
1338 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1342 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1343 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1347 /* check configuration for vmdb+dcb mode */
1348 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1349 const struct rte_eth_vmdq_dcb_conf *conf;
1351 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1352 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1353 TXGBE_VMDQ_DCB_NB_QUEUES);
1356 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1357 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1358 conf->nb_queue_pools == ETH_32_POOLS)) {
1359 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1360 " nb_queue_pools must be %d or %d.",
1361 ETH_16_POOLS, ETH_32_POOLS);
1365 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1366 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1368 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1369 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1370 TXGBE_VMDQ_DCB_NB_QUEUES);
1373 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1374 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1375 conf->nb_queue_pools == ETH_32_POOLS)) {
1376 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1377 " nb_queue_pools != %d and"
1378 " nb_queue_pools != %d.",
1379 ETH_16_POOLS, ETH_32_POOLS);
1384 /* For DCB mode check our configuration before we go further */
1385 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1386 const struct rte_eth_dcb_rx_conf *conf;
1388 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1389 if (!(conf->nb_tcs == ETH_4_TCS ||
1390 conf->nb_tcs == ETH_8_TCS)) {
1391 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1392 " and nb_tcs != %d.",
1393 ETH_4_TCS, ETH_8_TCS);
1398 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1399 const struct rte_eth_dcb_tx_conf *conf;
1401 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1402 if (!(conf->nb_tcs == ETH_4_TCS ||
1403 conf->nb_tcs == ETH_8_TCS)) {
1404 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1405 " and nb_tcs != %d.",
1406 ETH_4_TCS, ETH_8_TCS);
1415 txgbe_dev_configure(struct rte_eth_dev *dev)
1417 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1418 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1421 PMD_INIT_FUNC_TRACE();
1423 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1424 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1426 /* multiple queue mode checking */
1427 ret = txgbe_check_mq_mode(dev);
1429 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1434 /* set flag to update link status after init */
1435 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1438 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1439 * allocation Rx preconditions we will reset it.
1441 adapter->rx_bulk_alloc_allowed = true;
1447 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1449 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1450 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1453 gpie = rd32(hw, TXGBE_GPIOINTEN);
1454 gpie |= TXGBE_GPIOBIT_6;
1455 wr32(hw, TXGBE_GPIOINTEN, gpie);
1456 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1460 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1461 uint16_t tx_rate, uint64_t q_msk)
1463 struct txgbe_hw *hw;
1464 struct txgbe_vf_info *vfinfo;
1465 struct rte_eth_link link;
1466 uint8_t nb_q_per_pool;
1467 uint32_t queue_stride;
1468 uint32_t queue_idx, idx = 0, vf_idx;
1470 uint16_t total_rate = 0;
1471 struct rte_pci_device *pci_dev;
1474 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1475 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1479 if (vf >= pci_dev->max_vfs)
1482 if (tx_rate > link.link_speed)
1488 hw = TXGBE_DEV_HW(dev);
1489 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1490 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1491 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1492 queue_idx = vf * queue_stride;
1493 queue_end = queue_idx + nb_q_per_pool - 1;
1494 if (queue_end >= hw->mac.max_tx_queues)
1498 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1501 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1503 total_rate += vfinfo[vf_idx].tx_rate[idx];
1509 /* Store tx_rate for this vf. */
1510 for (idx = 0; idx < nb_q_per_pool; idx++) {
1511 if (((uint64_t)0x1 << idx) & q_msk) {
1512 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1513 vfinfo[vf].tx_rate[idx] = tx_rate;
1514 total_rate += tx_rate;
1518 if (total_rate > dev->data->dev_link.link_speed) {
1519 /* Reset stored TX rate of the VF if it causes exceed
1522 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1526 /* Set ARBTXRATE of each queue/pool for vf X */
1527 for (; queue_idx <= queue_end; queue_idx++) {
1529 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1537 * Configure device link speed and setup link.
1538 * It returns 0 on success.
1541 txgbe_dev_start(struct rte_eth_dev *dev)
1543 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1544 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1545 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1546 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1547 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1548 uint32_t intr_vector = 0;
1550 bool link_up = false, negotiate = 0;
1552 uint32_t allowed_speeds = 0;
1556 uint32_t *link_speeds;
1557 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1559 PMD_INIT_FUNC_TRACE();
1561 /* TXGBE devices don't support:
1562 * - half duplex (checked afterwards for valid speeds)
1563 * - fixed speed: TODO implement
1565 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1567 "Invalid link_speeds for port %u, fix speed not supported",
1568 dev->data->port_id);
1572 /* Stop the link setup handler before resetting the HW. */
1573 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1575 /* disable uio/vfio intr/eventfd mapping */
1576 rte_intr_disable(intr_handle);
1579 hw->adapter_stopped = 0;
1582 /* reinitialize adapter
1583 * this calls reset and start
1585 hw->nb_rx_queues = dev->data->nb_rx_queues;
1586 hw->nb_tx_queues = dev->data->nb_tx_queues;
1587 status = txgbe_pf_reset_hw(hw);
1590 hw->mac.start_hw(hw);
1591 hw->mac.get_link_status = true;
1593 /* configure PF module if SRIOV enabled */
1594 txgbe_pf_host_configure(dev);
1596 txgbe_dev_phy_intr_setup(dev);
1598 /* check and configure queue intr-vector mapping */
1599 if ((rte_intr_cap_multiple(intr_handle) ||
1600 !RTE_ETH_DEV_SRIOV(dev).active) &&
1601 dev->data->dev_conf.intr_conf.rxq != 0) {
1602 intr_vector = dev->data->nb_rx_queues;
1603 if (rte_intr_efd_enable(intr_handle, intr_vector))
1607 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1608 intr_handle->intr_vec =
1609 rte_zmalloc("intr_vec",
1610 dev->data->nb_rx_queues * sizeof(int), 0);
1611 if (intr_handle->intr_vec == NULL) {
1612 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1613 " intr_vec", dev->data->nb_rx_queues);
1618 /* confiugre msix for sleep until rx interrupt */
1619 txgbe_configure_msix(dev);
1621 /* initialize transmission unit */
1622 txgbe_dev_tx_init(dev);
1624 /* This can fail when allocating mbufs for descriptor rings */
1625 err = txgbe_dev_rx_init(dev);
1627 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1631 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1632 ETH_VLAN_EXTEND_MASK;
1633 err = txgbe_vlan_offload_config(dev, mask);
1635 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1639 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1640 /* Enable vlan filtering for VMDq */
1641 txgbe_vmdq_vlan_hw_filter_enable(dev);
1644 /* Configure DCB hw */
1645 txgbe_configure_pb(dev);
1646 txgbe_configure_port(dev);
1647 txgbe_configure_dcb(dev);
1649 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1650 err = txgbe_fdir_configure(dev);
1655 /* Restore vf rate limit */
1656 if (vfinfo != NULL) {
1657 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1658 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1659 if (vfinfo[vf].tx_rate[idx] != 0)
1660 txgbe_set_vf_rate_limit(dev, vf,
1661 vfinfo[vf].tx_rate[idx],
1665 err = txgbe_dev_rxtx_start(dev);
1667 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1671 /* Skip link setup if loopback mode is enabled. */
1672 if (hw->mac.type == txgbe_mac_raptor &&
1673 dev->data->dev_conf.lpbk_mode)
1674 goto skip_link_setup;
1676 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1677 err = hw->mac.setup_sfp(hw);
1682 if (hw->phy.media_type == txgbe_media_type_copper) {
1683 /* Turn on the copper */
1684 hw->phy.set_phy_power(hw, true);
1686 /* Turn on the laser */
1687 hw->mac.enable_tx_laser(hw);
1690 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1693 dev->data->dev_link.link_status = link_up;
1695 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1699 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1702 link_speeds = &dev->data->dev_conf.link_speeds;
1703 if (*link_speeds & ~allowed_speeds) {
1704 PMD_INIT_LOG(ERR, "Invalid link setting");
1709 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1710 speed = (TXGBE_LINK_SPEED_100M_FULL |
1711 TXGBE_LINK_SPEED_1GB_FULL |
1712 TXGBE_LINK_SPEED_10GB_FULL);
1714 if (*link_speeds & ETH_LINK_SPEED_10G)
1715 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1716 if (*link_speeds & ETH_LINK_SPEED_5G)
1717 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1718 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1719 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1720 if (*link_speeds & ETH_LINK_SPEED_1G)
1721 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1722 if (*link_speeds & ETH_LINK_SPEED_100M)
1723 speed |= TXGBE_LINK_SPEED_100M_FULL;
1726 err = hw->mac.setup_link(hw, speed, link_up);
1732 if (rte_intr_allow_others(intr_handle)) {
1733 /* check if lsc interrupt is enabled */
1734 if (dev->data->dev_conf.intr_conf.lsc != 0)
1735 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1737 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1738 txgbe_dev_macsec_interrupt_setup(dev);
1739 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1741 rte_intr_callback_unregister(intr_handle,
1742 txgbe_dev_interrupt_handler, dev);
1743 if (dev->data->dev_conf.intr_conf.lsc != 0)
1744 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1745 " no intr multiplex");
1748 /* check if rxq interrupt is enabled */
1749 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1750 rte_intr_dp_is_en(intr_handle))
1751 txgbe_dev_rxq_interrupt_setup(dev);
1753 /* enable uio/vfio intr/eventfd mapping */
1754 rte_intr_enable(intr_handle);
1756 /* resume enabled intr since hw reset */
1757 txgbe_enable_intr(dev);
1758 txgbe_l2_tunnel_conf(dev);
1759 txgbe_filter_restore(dev);
1761 if (tm_conf->root && !tm_conf->committed)
1762 PMD_DRV_LOG(WARNING,
1763 "please call hierarchy_commit() "
1764 "before starting the port");
1767 * Update link status right before return, because it may
1768 * start link configuration process in a separate thread.
1770 txgbe_dev_link_update(dev, 0);
1772 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1774 txgbe_read_stats_registers(hw, hw_stats);
1775 hw->offset_loaded = 1;
1780 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1781 txgbe_dev_clear_queues(dev);
1786 * Stop device: disable rx and tx functions to allow for reconfiguring.
1789 txgbe_dev_stop(struct rte_eth_dev *dev)
1791 struct rte_eth_link link;
1792 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1793 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1794 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1796 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1798 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1800 if (hw->adapter_stopped)
1803 PMD_INIT_FUNC_TRACE();
1805 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1807 /* disable interrupts */
1808 txgbe_disable_intr(hw);
1811 txgbe_pf_reset_hw(hw);
1812 hw->adapter_stopped = 0;
1817 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1818 vfinfo[vf].clear_to_send = false;
1820 if (hw->phy.media_type == txgbe_media_type_copper) {
1821 /* Turn off the copper */
1822 hw->phy.set_phy_power(hw, false);
1824 /* Turn off the laser */
1825 hw->mac.disable_tx_laser(hw);
1828 txgbe_dev_clear_queues(dev);
1830 /* Clear stored conf */
1831 dev->data->scattered_rx = 0;
1834 /* Clear recorded link status */
1835 memset(&link, 0, sizeof(link));
1836 rte_eth_linkstatus_set(dev, &link);
1838 if (!rte_intr_allow_others(intr_handle))
1839 /* resume to the default handler */
1840 rte_intr_callback_register(intr_handle,
1841 txgbe_dev_interrupt_handler,
1844 /* Clean datapath event and queue/vec mapping */
1845 rte_intr_efd_disable(intr_handle);
1846 if (intr_handle->intr_vec != NULL) {
1847 rte_free(intr_handle->intr_vec);
1848 intr_handle->intr_vec = NULL;
1851 /* reset hierarchy commit */
1852 tm_conf->committed = false;
1854 adapter->rss_reta_updated = 0;
1855 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1857 hw->adapter_stopped = true;
1858 dev->data->dev_started = 0;
1864 * Set device link up: enable tx.
1867 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1869 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1871 if (hw->phy.media_type == txgbe_media_type_copper) {
1872 /* Turn on the copper */
1873 hw->phy.set_phy_power(hw, true);
1875 /* Turn on the laser */
1876 hw->mac.enable_tx_laser(hw);
1877 txgbe_dev_link_update(dev, 0);
1884 * Set device link down: disable tx.
1887 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1889 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1891 if (hw->phy.media_type == txgbe_media_type_copper) {
1892 /* Turn off the copper */
1893 hw->phy.set_phy_power(hw, false);
1895 /* Turn off the laser */
1896 hw->mac.disable_tx_laser(hw);
1897 txgbe_dev_link_update(dev, 0);
1904 * Reset and stop device.
1907 txgbe_dev_close(struct rte_eth_dev *dev)
1909 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1910 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1911 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1915 PMD_INIT_FUNC_TRACE();
1917 txgbe_pf_reset_hw(hw);
1919 ret = txgbe_dev_stop(dev);
1921 txgbe_dev_free_queues(dev);
1923 /* reprogram the RAR[0] in case user changed it. */
1924 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1926 /* Unlock any pending hardware semaphore */
1927 txgbe_swfw_lock_reset(hw);
1929 /* disable uio intr before callback unregister */
1930 rte_intr_disable(intr_handle);
1933 ret = rte_intr_callback_unregister(intr_handle,
1934 txgbe_dev_interrupt_handler, dev);
1935 if (ret >= 0 || ret == -ENOENT) {
1937 } else if (ret != -EAGAIN) {
1939 "intr callback unregister failed: %d",
1943 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1945 /* cancel the delay handler before remove dev */
1946 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1948 /* uninitialize PF if max_vfs not zero */
1949 txgbe_pf_host_uninit(dev);
1951 rte_free(dev->data->mac_addrs);
1952 dev->data->mac_addrs = NULL;
1954 rte_free(dev->data->hash_mac_addrs);
1955 dev->data->hash_mac_addrs = NULL;
1957 /* remove all the fdir filters & hash */
1958 txgbe_fdir_filter_uninit(dev);
1960 /* remove all the L2 tunnel filters & hash */
1961 txgbe_l2_tn_filter_uninit(dev);
1963 /* Remove all ntuple filters of the device */
1964 txgbe_ntuple_filter_uninit(dev);
1966 /* clear all the filters list */
1967 txgbe_filterlist_flush();
1969 /* Remove all Traffic Manager configuration */
1970 txgbe_tm_conf_uninit(dev);
1972 #ifdef RTE_LIB_SECURITY
1973 rte_free(dev->security_ctx);
1983 txgbe_dev_reset(struct rte_eth_dev *dev)
1987 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1988 * its VF to make them align with it. The detailed notification
1989 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1990 * To avoid unexpected behavior in VF, currently reset of PF with
1991 * SR-IOV activation is not supported. It might be supported later.
1993 if (dev->data->sriov.active)
1996 ret = eth_txgbe_dev_uninit(dev);
2000 ret = eth_txgbe_dev_init(dev, NULL);
2005 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
2007 uint32_t current_counter = rd32(hw, reg); \
2008 if (current_counter < last_counter) \
2009 current_counter += 0x100000000LL; \
2010 if (!hw->offset_loaded) \
2011 last_counter = current_counter; \
2012 counter = current_counter - last_counter; \
2013 counter &= 0xFFFFFFFFLL; \
2016 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2018 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
2019 uint64_t current_counter_msb = rd32(hw, reg_msb); \
2020 uint64_t current_counter = (current_counter_msb << 32) | \
2021 current_counter_lsb; \
2022 if (current_counter < last_counter) \
2023 current_counter += 0x1000000000LL; \
2024 if (!hw->offset_loaded) \
2025 last_counter = current_counter; \
2026 counter = current_counter - last_counter; \
2027 counter &= 0xFFFFFFFFFLL; \
2031 txgbe_read_stats_registers(struct txgbe_hw *hw,
2032 struct txgbe_hw_stats *hw_stats)
2037 for (i = 0; i < hw->nb_rx_queues; i++) {
2038 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2039 hw->qp_last[i].rx_qp_packets,
2040 hw_stats->qp[i].rx_qp_packets);
2041 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2042 hw->qp_last[i].rx_qp_bytes,
2043 hw_stats->qp[i].rx_qp_bytes);
2044 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2045 hw->qp_last[i].rx_qp_mc_packets,
2046 hw_stats->qp[i].rx_qp_mc_packets);
2049 for (i = 0; i < hw->nb_tx_queues; i++) {
2050 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2051 hw->qp_last[i].tx_qp_packets,
2052 hw_stats->qp[i].tx_qp_packets);
2053 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2054 hw->qp_last[i].tx_qp_bytes,
2055 hw_stats->qp[i].tx_qp_bytes);
2058 for (i = 0; i < TXGBE_MAX_UP; i++) {
2059 hw_stats->up[i].rx_up_xon_packets +=
2060 rd32(hw, TXGBE_PBRXUPXON(i));
2061 hw_stats->up[i].rx_up_xoff_packets +=
2062 rd32(hw, TXGBE_PBRXUPXOFF(i));
2063 hw_stats->up[i].tx_up_xon_packets +=
2064 rd32(hw, TXGBE_PBTXUPXON(i));
2065 hw_stats->up[i].tx_up_xoff_packets +=
2066 rd32(hw, TXGBE_PBTXUPXOFF(i));
2067 hw_stats->up[i].tx_up_xon2off_packets +=
2068 rd32(hw, TXGBE_PBTXUPOFF(i));
2069 hw_stats->up[i].rx_up_dropped +=
2070 rd32(hw, TXGBE_PBRXMISS(i));
2072 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2073 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2074 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2075 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2078 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2079 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2081 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2082 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2083 hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2084 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2087 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2088 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2089 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2091 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2092 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2093 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2095 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2096 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2098 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2099 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2100 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2101 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2102 hw_stats->rx_size_512_to_1023_packets +=
2103 rd64(hw, TXGBE_MACRX512TO1023L);
2104 hw_stats->rx_size_1024_to_max_packets +=
2105 rd64(hw, TXGBE_MACRX1024TOMAXL);
2106 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2107 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2108 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2109 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2110 hw_stats->tx_size_512_to_1023_packets +=
2111 rd64(hw, TXGBE_MACTX512TO1023L);
2112 hw_stats->tx_size_1024_to_max_packets +=
2113 rd64(hw, TXGBE_MACTX1024TOMAXL);
2115 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2116 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2117 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2120 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2121 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2122 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2123 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2126 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2127 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2128 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2129 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2130 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2131 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2132 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2134 /* Flow Director Stats */
2135 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2136 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2137 hw_stats->flow_director_added_filters +=
2138 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2139 hw_stats->flow_director_removed_filters +=
2140 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2141 hw_stats->flow_director_filter_add_errors +=
2142 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2143 hw_stats->flow_director_filter_remove_errors +=
2144 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2147 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2148 hw_stats->tx_macsec_pkts_encrypted +=
2149 rd32(hw, TXGBE_LSECTX_ENCPKT);
2150 hw_stats->tx_macsec_pkts_protected +=
2151 rd32(hw, TXGBE_LSECTX_PROTPKT);
2152 hw_stats->tx_macsec_octets_encrypted +=
2153 rd32(hw, TXGBE_LSECTX_ENCOCT);
2154 hw_stats->tx_macsec_octets_protected +=
2155 rd32(hw, TXGBE_LSECTX_PROTOCT);
2156 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2157 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2158 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2159 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2160 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2161 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2162 hw_stats->rx_macsec_sc_pkts_unchecked +=
2163 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2164 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2165 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2166 for (i = 0; i < 2; i++) {
2167 hw_stats->rx_macsec_sa_pkts_ok +=
2168 rd32(hw, TXGBE_LSECRX_OKPKT(i));
2169 hw_stats->rx_macsec_sa_pkts_invalid +=
2170 rd32(hw, TXGBE_LSECRX_INVPKT(i));
2171 hw_stats->rx_macsec_sa_pkts_notvalid +=
2172 rd32(hw, TXGBE_LSECRX_BADPKT(i));
2174 hw_stats->rx_macsec_sa_pkts_unusedsa +=
2175 rd32(hw, TXGBE_LSECRX_INVSAPKT);
2176 hw_stats->rx_macsec_sa_pkts_notusingsa +=
2177 rd32(hw, TXGBE_LSECRX_BADSAPKT);
2179 hw_stats->rx_total_missed_packets = 0;
2180 for (i = 0; i < TXGBE_MAX_UP; i++) {
2181 hw_stats->rx_total_missed_packets +=
2182 hw_stats->up[i].rx_up_dropped;
2187 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2189 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2190 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2191 struct txgbe_stat_mappings *stat_mappings =
2192 TXGBE_DEV_STAT_MAPPINGS(dev);
2195 txgbe_read_stats_registers(hw, hw_stats);
2200 /* Fill out the rte_eth_stats statistics structure */
2201 stats->ipackets = hw_stats->rx_packets;
2202 stats->ibytes = hw_stats->rx_bytes;
2203 stats->opackets = hw_stats->tx_packets;
2204 stats->obytes = hw_stats->tx_bytes;
2206 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2207 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2208 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2209 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2210 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2211 for (i = 0; i < TXGBE_MAX_QP; i++) {
2212 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2213 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2216 q_map = (stat_mappings->rqsm[n] >> offset)
2217 & QMAP_FIELD_RESERVED_BITS_MASK;
2218 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2219 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2220 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2221 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2223 q_map = (stat_mappings->tqsm[n] >> offset)
2224 & QMAP_FIELD_RESERVED_BITS_MASK;
2225 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2226 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2227 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2228 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2232 stats->imissed = hw_stats->rx_total_missed_packets +
2233 hw_stats->rx_dma_drop;
2234 stats->ierrors = hw_stats->rx_crc_errors +
2235 hw_stats->rx_mac_short_packet_dropped +
2236 hw_stats->rx_length_errors +
2237 hw_stats->rx_undersize_errors +
2238 hw_stats->rx_oversize_errors +
2239 hw_stats->rx_drop_packets +
2240 hw_stats->rx_illegal_byte_errors +
2241 hw_stats->rx_error_bytes +
2242 hw_stats->rx_fragment_errors +
2243 hw_stats->rx_fcoe_crc_errors +
2244 hw_stats->rx_fcoe_mbuf_allocation_errors;
2252 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2254 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2255 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2257 /* HW registers are cleared on read */
2258 hw->offset_loaded = 0;
2259 txgbe_dev_stats_get(dev, NULL);
2260 hw->offset_loaded = 1;
2262 /* Reset software totals */
2263 memset(hw_stats, 0, sizeof(*hw_stats));
2268 /* This function calculates the number of xstats based on the current config */
2270 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2272 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2273 return TXGBE_NB_HW_STATS +
2274 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2275 TXGBE_NB_QP_STATS * nb_queues;
2279 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2283 /* Extended stats from txgbe_hw_stats */
2284 if (id < TXGBE_NB_HW_STATS) {
2285 snprintf(name, size, "[hw]%s",
2286 rte_txgbe_stats_strings[id].name);
2289 id -= TXGBE_NB_HW_STATS;
2291 /* Priority Stats */
2292 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2293 nb = id / TXGBE_NB_UP_STATS;
2294 st = id % TXGBE_NB_UP_STATS;
2295 snprintf(name, size, "[p%u]%s", nb,
2296 rte_txgbe_up_strings[st].name);
2299 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2302 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2303 nb = id / TXGBE_NB_QP_STATS;
2304 st = id % TXGBE_NB_QP_STATS;
2305 snprintf(name, size, "[q%u]%s", nb,
2306 rte_txgbe_qp_strings[st].name);
2309 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2311 return -(int)(id + 1);
2315 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2319 /* Extended stats from txgbe_hw_stats */
2320 if (id < TXGBE_NB_HW_STATS) {
2321 *offset = rte_txgbe_stats_strings[id].offset;
2324 id -= TXGBE_NB_HW_STATS;
2326 /* Priority Stats */
2327 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2328 nb = id / TXGBE_NB_UP_STATS;
2329 st = id % TXGBE_NB_UP_STATS;
2330 *offset = rte_txgbe_up_strings[st].offset +
2331 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2334 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2337 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2338 nb = id / TXGBE_NB_QP_STATS;
2339 st = id % TXGBE_NB_QP_STATS;
2340 *offset = rte_txgbe_qp_strings[st].offset +
2341 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2348 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2349 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2351 unsigned int i, count;
2353 count = txgbe_xstats_calc_num(dev);
2354 if (xstats_names == NULL)
2357 /* Note: limit >= cnt_stats checked upstream
2358 * in rte_eth_xstats_names()
2360 limit = min(limit, count);
2362 /* Extended stats from txgbe_hw_stats */
2363 for (i = 0; i < limit; i++) {
2364 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2365 sizeof(xstats_names[i].name))) {
2366 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2374 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2375 struct rte_eth_xstat_name *xstats_names,
2376 const uint64_t *ids,
2382 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2384 for (i = 0; i < limit; i++) {
2385 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2386 sizeof(xstats_names[i].name))) {
2387 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2396 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2399 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2400 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2401 unsigned int i, count;
2403 txgbe_read_stats_registers(hw, hw_stats);
2405 /* If this is a reset xstats is NULL, and we have cleared the
2406 * registers by reading them.
2408 count = txgbe_xstats_calc_num(dev);
2412 limit = min(limit, txgbe_xstats_calc_num(dev));
2414 /* Extended stats from txgbe_hw_stats */
2415 for (i = 0; i < limit; i++) {
2416 uint32_t offset = 0;
2418 if (txgbe_get_offset_by_id(i, &offset)) {
2419 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2422 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2430 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2433 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2434 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2435 unsigned int i, count;
2437 txgbe_read_stats_registers(hw, hw_stats);
2439 /* If this is a reset xstats is NULL, and we have cleared the
2440 * registers by reading them.
2442 count = txgbe_xstats_calc_num(dev);
2446 limit = min(limit, txgbe_xstats_calc_num(dev));
2448 /* Extended stats from txgbe_hw_stats */
2449 for (i = 0; i < limit; i++) {
2452 if (txgbe_get_offset_by_id(i, &offset)) {
2453 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2456 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2463 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2464 uint64_t *values, unsigned int limit)
2466 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2470 return txgbe_dev_xstats_get_(dev, values, limit);
2472 for (i = 0; i < limit; i++) {
2475 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2476 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2479 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2486 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2488 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2489 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2491 /* HW registers are cleared on read */
2492 hw->offset_loaded = 0;
2493 txgbe_read_stats_registers(hw, hw_stats);
2494 hw->offset_loaded = 1;
2496 /* Reset software totals */
2497 memset(hw_stats, 0, sizeof(*hw_stats));
2503 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2505 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2506 u16 eeprom_verh, eeprom_verl;
2510 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2511 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2513 etrack_id = (eeprom_verh << 16) | eeprom_verl;
2514 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2516 ret += 1; /* add the size of '\0' */
2517 if (fw_size < (u32)ret)
2524 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2526 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2529 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2530 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2531 dev_info->min_rx_bufsize = 1024;
2532 dev_info->max_rx_pktlen = 15872;
2533 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2534 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2535 dev_info->max_vfs = pci_dev->max_vfs;
2536 dev_info->max_vmdq_pools = ETH_64_POOLS;
2537 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2538 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2539 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2540 dev_info->rx_queue_offload_capa);
2541 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2542 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2544 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2546 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2547 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2548 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2550 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2555 dev_info->default_txconf = (struct rte_eth_txconf) {
2557 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2558 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2559 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2561 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2565 dev_info->rx_desc_lim = rx_desc_lim;
2566 dev_info->tx_desc_lim = tx_desc_lim;
2568 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2569 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2570 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2572 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2573 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2575 /* Driver-preferred Rx/Tx parameters */
2576 dev_info->default_rxportconf.burst_size = 32;
2577 dev_info->default_txportconf.burst_size = 32;
2578 dev_info->default_rxportconf.nb_queues = 1;
2579 dev_info->default_txportconf.nb_queues = 1;
2580 dev_info->default_rxportconf.ring_size = 256;
2581 dev_info->default_txportconf.ring_size = 256;
2587 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2589 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2590 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2591 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2592 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2593 return txgbe_get_supported_ptypes();
2599 txgbe_dev_setup_link_alarm_handler(void *param)
2601 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2602 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2603 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2605 bool autoneg = false;
2607 speed = hw->phy.autoneg_advertised;
2609 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2611 hw->mac.setup_link(hw, speed, true);
2613 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2616 /* return 0 means link status changed, -1 means not changed */
2618 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2619 int wait_to_complete)
2621 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2622 struct rte_eth_link link;
2623 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2624 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2629 memset(&link, 0, sizeof(link));
2630 link.link_status = ETH_LINK_DOWN;
2631 link.link_speed = ETH_SPEED_NUM_NONE;
2632 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2633 link.link_autoneg = ETH_LINK_AUTONEG;
2635 hw->mac.get_link_status = true;
2637 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2638 return rte_eth_linkstatus_set(dev, &link);
2640 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2641 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2644 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2647 link.link_speed = ETH_SPEED_NUM_100M;
2648 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2649 return rte_eth_linkstatus_set(dev, &link);
2653 if (hw->phy.media_type == txgbe_media_type_fiber) {
2654 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2655 rte_eal_alarm_set(10,
2656 txgbe_dev_setup_link_alarm_handler, dev);
2658 return rte_eth_linkstatus_set(dev, &link);
2661 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2662 link.link_status = ETH_LINK_UP;
2663 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2665 switch (link_speed) {
2667 case TXGBE_LINK_SPEED_UNKNOWN:
2668 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2669 link.link_speed = ETH_SPEED_NUM_100M;
2672 case TXGBE_LINK_SPEED_100M_FULL:
2673 link.link_speed = ETH_SPEED_NUM_100M;
2676 case TXGBE_LINK_SPEED_1GB_FULL:
2677 link.link_speed = ETH_SPEED_NUM_1G;
2680 case TXGBE_LINK_SPEED_2_5GB_FULL:
2681 link.link_speed = ETH_SPEED_NUM_2_5G;
2684 case TXGBE_LINK_SPEED_5GB_FULL:
2685 link.link_speed = ETH_SPEED_NUM_5G;
2688 case TXGBE_LINK_SPEED_10GB_FULL:
2689 link.link_speed = ETH_SPEED_NUM_10G;
2693 return rte_eth_linkstatus_set(dev, &link);
2697 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2699 return txgbe_dev_link_update_share(dev, wait_to_complete);
2703 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2705 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2708 fctrl = rd32(hw, TXGBE_PSRCTL);
2709 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2710 wr32(hw, TXGBE_PSRCTL, fctrl);
2716 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2718 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2721 fctrl = rd32(hw, TXGBE_PSRCTL);
2722 fctrl &= (~TXGBE_PSRCTL_UCP);
2723 if (dev->data->all_multicast == 1)
2724 fctrl |= TXGBE_PSRCTL_MCP;
2726 fctrl &= (~TXGBE_PSRCTL_MCP);
2727 wr32(hw, TXGBE_PSRCTL, fctrl);
2733 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2735 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2738 fctrl = rd32(hw, TXGBE_PSRCTL);
2739 fctrl |= TXGBE_PSRCTL_MCP;
2740 wr32(hw, TXGBE_PSRCTL, fctrl);
2746 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2748 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2751 if (dev->data->promiscuous == 1)
2752 return 0; /* must remain in all_multicast mode */
2754 fctrl = rd32(hw, TXGBE_PSRCTL);
2755 fctrl &= (~TXGBE_PSRCTL_MCP);
2756 wr32(hw, TXGBE_PSRCTL, fctrl);
2762 * It clears the interrupt causes and enables the interrupt.
2763 * It will be called once only during nic initialized.
2766 * Pointer to struct rte_eth_dev.
2768 * Enable or Disable.
2771 * - On success, zero.
2772 * - On failure, a negative value.
2775 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2777 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2779 txgbe_dev_link_status_print(dev);
2781 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2783 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2789 * It clears the interrupt causes and enables the interrupt.
2790 * It will be called once only during nic initialized.
2793 * Pointer to struct rte_eth_dev.
2796 * - On success, zero.
2797 * - On failure, a negative value.
2800 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2802 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2804 intr->mask[0] |= TXGBE_ICR_MASK;
2805 intr->mask[1] |= TXGBE_ICR_MASK;
2811 * It clears the interrupt causes and enables the interrupt.
2812 * It will be called once only during nic initialized.
2815 * Pointer to struct rte_eth_dev.
2818 * - On success, zero.
2819 * - On failure, a negative value.
2822 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2824 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2826 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2832 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2835 * Pointer to struct rte_eth_dev.
2838 * - On success, zero.
2839 * - On failure, a negative value.
2842 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2845 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2846 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2848 /* clear all cause mask */
2849 txgbe_disable_intr(hw);
2851 /* read-on-clear nic registers here */
2852 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2853 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2857 /* set flag for async link update */
2858 if (eicr & TXGBE_ICRMISC_LSC)
2859 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2861 if (eicr & TXGBE_ICRMISC_VFMBX)
2862 intr->flags |= TXGBE_FLAG_MAILBOX;
2864 if (eicr & TXGBE_ICRMISC_LNKSEC)
2865 intr->flags |= TXGBE_FLAG_MACSEC;
2867 if (eicr & TXGBE_ICRMISC_GPIO)
2868 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2874 * It gets and then prints the link status.
2877 * Pointer to struct rte_eth_dev.
2880 * - On success, zero.
2881 * - On failure, a negative value.
2884 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2886 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2887 struct rte_eth_link link;
2889 rte_eth_linkstatus_get(dev, &link);
2891 if (link.link_status) {
2892 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2893 (int)(dev->data->port_id),
2894 (unsigned int)link.link_speed,
2895 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2896 "full-duplex" : "half-duplex");
2898 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2899 (int)(dev->data->port_id));
2901 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2902 pci_dev->addr.domain,
2904 pci_dev->addr.devid,
2905 pci_dev->addr.function);
2909 * It executes link_update after knowing an interrupt occurred.
2912 * Pointer to struct rte_eth_dev.
2915 * - On success, zero.
2916 * - On failure, a negative value.
2919 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2920 struct rte_intr_handle *intr_handle)
2922 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2924 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2926 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2928 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2929 txgbe_pf_mbx_process(dev);
2930 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2933 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2934 hw->phy.handle_lasi(hw);
2935 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2938 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2939 struct rte_eth_link link;
2941 /*get the link status before link update, for predicting later*/
2942 rte_eth_linkstatus_get(dev, &link);
2944 txgbe_dev_link_update(dev, 0);
2947 if (!link.link_status)
2948 /* handle it 1 sec later, wait it being stable */
2949 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2950 /* likely to down */
2952 /* handle it 4 sec later, wait it being stable */
2953 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2955 txgbe_dev_link_status_print(dev);
2956 if (rte_eal_alarm_set(timeout * 1000,
2957 txgbe_dev_interrupt_delayed_handler,
2959 PMD_DRV_LOG(ERR, "Error setting alarm");
2961 /* remember original mask */
2962 intr->mask_misc_orig = intr->mask_misc;
2963 /* only disable lsc interrupt */
2964 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2968 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2969 txgbe_enable_intr(dev);
2970 rte_intr_enable(intr_handle);
2976 * Interrupt handler which shall be registered for alarm callback for delayed
2977 * handling specific interrupt to wait for the stable nic state. As the
2978 * NIC interrupt state is not stable for txgbe after link is just down,
2979 * it needs to wait 4 seconds to get the stable status.
2982 * Pointer to interrupt handle.
2984 * The address of parameter (struct rte_eth_dev *) registered before.
2990 txgbe_dev_interrupt_delayed_handler(void *param)
2992 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2993 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2994 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2995 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2996 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2999 txgbe_disable_intr(hw);
3001 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3002 if (eicr & TXGBE_ICRMISC_VFMBX)
3003 txgbe_pf_mbx_process(dev);
3005 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3006 hw->phy.handle_lasi(hw);
3007 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3010 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3011 txgbe_dev_link_update(dev, 0);
3012 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3013 txgbe_dev_link_status_print(dev);
3014 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3018 if (intr->flags & TXGBE_FLAG_MACSEC) {
3019 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3021 intr->flags &= ~TXGBE_FLAG_MACSEC;
3024 /* restore original mask */
3025 intr->mask_misc = intr->mask_misc_orig;
3026 intr->mask_misc_orig = 0;
3028 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3029 txgbe_enable_intr(dev);
3030 rte_intr_enable(intr_handle);
3034 * Interrupt handler triggered by NIC for handling
3035 * specific interrupt.
3038 * Pointer to interrupt handle.
3040 * The address of parameter (struct rte_eth_dev *) registered before.
3046 txgbe_dev_interrupt_handler(void *param)
3048 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3050 txgbe_dev_interrupt_get_status(dev);
3051 txgbe_dev_interrupt_action(dev, dev->intr_handle);
3055 txgbe_dev_led_on(struct rte_eth_dev *dev)
3057 struct txgbe_hw *hw;
3059 hw = TXGBE_DEV_HW(dev);
3060 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3064 txgbe_dev_led_off(struct rte_eth_dev *dev)
3066 struct txgbe_hw *hw;
3068 hw = TXGBE_DEV_HW(dev);
3069 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3073 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3075 struct txgbe_hw *hw;
3081 hw = TXGBE_DEV_HW(dev);
3083 fc_conf->pause_time = hw->fc.pause_time;
3084 fc_conf->high_water = hw->fc.high_water[0];
3085 fc_conf->low_water = hw->fc.low_water[0];
3086 fc_conf->send_xon = hw->fc.send_xon;
3087 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3090 * Return rx_pause status according to actual setting of
3093 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3094 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3100 * Return tx_pause status according to actual setting of
3103 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3104 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3109 if (rx_pause && tx_pause)
3110 fc_conf->mode = RTE_FC_FULL;
3112 fc_conf->mode = RTE_FC_RX_PAUSE;
3114 fc_conf->mode = RTE_FC_TX_PAUSE;
3116 fc_conf->mode = RTE_FC_NONE;
3122 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3124 struct txgbe_hw *hw;
3126 uint32_t rx_buf_size;
3127 uint32_t max_high_water;
3128 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3135 PMD_INIT_FUNC_TRACE();
3137 hw = TXGBE_DEV_HW(dev);
3138 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3139 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3142 * At least reserve one Ethernet frame for watermark
3143 * high_water/low_water in kilo bytes for txgbe
3145 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3146 if (fc_conf->high_water > max_high_water ||
3147 fc_conf->high_water < fc_conf->low_water) {
3148 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3149 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3153 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3154 hw->fc.pause_time = fc_conf->pause_time;
3155 hw->fc.high_water[0] = fc_conf->high_water;
3156 hw->fc.low_water[0] = fc_conf->low_water;
3157 hw->fc.send_xon = fc_conf->send_xon;
3158 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3160 err = txgbe_fc_enable(hw);
3162 /* Not negotiated is not an error case */
3163 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3164 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3165 (fc_conf->mac_ctrl_frame_fwd
3166 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3172 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3177 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3178 struct rte_eth_pfc_conf *pfc_conf)
3181 uint32_t rx_buf_size;
3182 uint32_t max_high_water;
3184 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
3185 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3186 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3188 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3195 PMD_INIT_FUNC_TRACE();
3197 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3198 tc_num = map[pfc_conf->priority];
3199 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3200 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3202 * At least reserve one Ethernet frame for watermark
3203 * high_water/low_water in kilo bytes for txgbe
3205 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3206 if (pfc_conf->fc.high_water > max_high_water ||
3207 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3208 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3209 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3213 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3214 hw->fc.pause_time = pfc_conf->fc.pause_time;
3215 hw->fc.send_xon = pfc_conf->fc.send_xon;
3216 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3217 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3219 err = txgbe_dcb_pfc_enable(hw, tc_num);
3221 /* Not negotiated is not an error case */
3222 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3225 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3230 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3231 struct rte_eth_rss_reta_entry64 *reta_conf,
3236 uint16_t idx, shift;
3237 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3238 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3240 PMD_INIT_FUNC_TRACE();
3242 if (!txgbe_rss_update_sp(hw->mac.type)) {
3243 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3248 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3249 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3250 "(%d) doesn't match the number hardware can supported "
3251 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3255 for (i = 0; i < reta_size; i += 4) {
3256 idx = i / RTE_RETA_GROUP_SIZE;
3257 shift = i % RTE_RETA_GROUP_SIZE;
3258 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3262 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3263 for (j = 0; j < 4; j++) {
3264 if (RS8(mask, j, 0x1)) {
3265 reta &= ~(MS32(8 * j, 0xFF));
3266 reta |= LS32(reta_conf[idx].reta[shift + j],
3270 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3272 adapter->rss_reta_updated = 1;
3278 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3279 struct rte_eth_rss_reta_entry64 *reta_conf,
3282 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3285 uint16_t idx, shift;
3287 PMD_INIT_FUNC_TRACE();
3289 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3290 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3291 "(%d) doesn't match the number hardware can supported "
3292 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3296 for (i = 0; i < reta_size; i += 4) {
3297 idx = i / RTE_RETA_GROUP_SIZE;
3298 shift = i % RTE_RETA_GROUP_SIZE;
3299 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3303 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3304 for (j = 0; j < 4; j++) {
3305 if (RS8(mask, j, 0x1))
3306 reta_conf[idx].reta[shift + j] =
3307 (uint16_t)RS32(reta, 8 * j, 0xFF);
3315 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3316 uint32_t index, uint32_t pool)
3318 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3319 uint32_t enable_addr = 1;
3321 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3326 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3328 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3330 txgbe_clear_rar(hw, index);
3334 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3336 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3338 txgbe_remove_rar(dev, 0);
3339 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3345 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3347 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3348 struct rte_eth_dev_info dev_info;
3349 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3350 struct rte_eth_dev_data *dev_data = dev->data;
3353 ret = txgbe_dev_info_get(dev, &dev_info);
3357 /* check that mtu is within the allowed range */
3358 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3361 /* If device is started, refuse mtu that requires the support of
3362 * scattered packets when this feature has not been enabled before.
3364 if (dev_data->dev_started && !dev_data->scattered_rx &&
3365 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3366 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3367 PMD_INIT_LOG(ERR, "Stop port first.");
3371 /* update max frame size */
3372 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3375 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3376 TXGBE_FRAME_SIZE_MAX);
3378 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3379 TXGBE_FRMSZ_MAX(frame_size));
3385 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3387 uint32_t vector = 0;
3389 switch (hw->mac.mc_filter_type) {
3390 case 0: /* use bits [47:36] of the address */
3391 vector = ((uc_addr->addr_bytes[4] >> 4) |
3392 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3394 case 1: /* use bits [46:35] of the address */
3395 vector = ((uc_addr->addr_bytes[4] >> 3) |
3396 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3398 case 2: /* use bits [45:34] of the address */
3399 vector = ((uc_addr->addr_bytes[4] >> 2) |
3400 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3402 case 3: /* use bits [43:32] of the address */
3403 vector = ((uc_addr->addr_bytes[4]) |
3404 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3406 default: /* Invalid mc_filter_type */
3410 /* vector can only be 12-bits or boundary will be exceeded */
3416 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3417 struct rte_ether_addr *mac_addr, uint8_t on)
3425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3426 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3428 /* The UTA table only exists on pf hardware */
3429 if (hw->mac.type < txgbe_mac_raptor)
3432 vector = txgbe_uta_vector(hw, mac_addr);
3433 uta_idx = (vector >> 5) & 0x7F;
3434 uta_mask = 0x1UL << (vector & 0x1F);
3436 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3439 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3441 uta_info->uta_in_use++;
3442 reg_val |= uta_mask;
3443 uta_info->uta_shadow[uta_idx] |= uta_mask;
3445 uta_info->uta_in_use--;
3446 reg_val &= ~uta_mask;
3447 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3450 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3452 psrctl = rd32(hw, TXGBE_PSRCTL);
3453 if (uta_info->uta_in_use > 0)
3454 psrctl |= TXGBE_PSRCTL_UCHFENA;
3456 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3458 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3459 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3460 wr32(hw, TXGBE_PSRCTL, psrctl);
3466 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3468 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3469 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3473 /* The UTA table only exists on pf hardware */
3474 if (hw->mac.type < txgbe_mac_raptor)
3478 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3479 uta_info->uta_shadow[i] = ~0;
3480 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3483 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3484 uta_info->uta_shadow[i] = 0;
3485 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3489 psrctl = rd32(hw, TXGBE_PSRCTL);
3491 psrctl |= TXGBE_PSRCTL_UCHFENA;
3493 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3495 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3496 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3497 wr32(hw, TXGBE_PSRCTL, psrctl);
3503 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3505 uint32_t new_val = orig_val;
3507 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3508 new_val |= TXGBE_POOLETHCTL_UTA;
3509 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3510 new_val |= TXGBE_POOLETHCTL_MCHA;
3511 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3512 new_val |= TXGBE_POOLETHCTL_UCHA;
3513 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3514 new_val |= TXGBE_POOLETHCTL_BCA;
3515 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3516 new_val |= TXGBE_POOLETHCTL_MCP;
3522 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3524 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3525 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3529 if (queue_id < 32) {
3530 mask = rd32(hw, TXGBE_IMS(0));
3531 mask &= (1 << queue_id);
3532 wr32(hw, TXGBE_IMS(0), mask);
3533 } else if (queue_id < 64) {
3534 mask = rd32(hw, TXGBE_IMS(1));
3535 mask &= (1 << (queue_id - 32));
3536 wr32(hw, TXGBE_IMS(1), mask);
3538 rte_intr_enable(intr_handle);
3544 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3547 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3549 if (queue_id < 32) {
3550 mask = rd32(hw, TXGBE_IMS(0));
3551 mask &= ~(1 << queue_id);
3552 wr32(hw, TXGBE_IMS(0), mask);
3553 } else if (queue_id < 64) {
3554 mask = rd32(hw, TXGBE_IMS(1));
3555 mask &= ~(1 << (queue_id - 32));
3556 wr32(hw, TXGBE_IMS(1), mask);
3563 * set the IVAR registers, mapping interrupt causes to vectors
3565 * pointer to txgbe_hw struct
3567 * 0 for Rx, 1 for Tx, -1 for other causes
3569 * queue to map the corresponding interrupt to
3571 * the vector to map to the corresponding queue
3574 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3575 uint8_t queue, uint8_t msix_vector)
3579 if (direction == -1) {
3581 msix_vector |= TXGBE_IVARMISC_VLD;
3583 tmp = rd32(hw, TXGBE_IVARMISC);
3584 tmp &= ~(0xFF << idx);
3585 tmp |= (msix_vector << idx);
3586 wr32(hw, TXGBE_IVARMISC, tmp);
3588 /* rx or tx causes */
3589 /* Workround for ICR lost */
3590 idx = ((16 * (queue & 1)) + (8 * direction));
3591 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3592 tmp &= ~(0xFF << idx);
3593 tmp |= (msix_vector << idx);
3594 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3599 * Sets up the hardware to properly generate MSI-X interrupts
3601 * board private structure
3604 txgbe_configure_msix(struct rte_eth_dev *dev)
3606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3607 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3608 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3609 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3610 uint32_t vec = TXGBE_MISC_VEC_ID;
3613 /* won't configure msix register if no mapping is done
3614 * between intr vector and event fd
3615 * but if misx has been enabled already, need to configure
3616 * auto clean, auto mask and throttling.
3618 gpie = rd32(hw, TXGBE_GPIE);
3619 if (!rte_intr_dp_is_en(intr_handle) &&
3620 !(gpie & TXGBE_GPIE_MSIX))
3623 if (rte_intr_allow_others(intr_handle)) {
3624 base = TXGBE_RX_VEC_START;
3628 /* setup GPIE for MSI-x mode */
3629 gpie = rd32(hw, TXGBE_GPIE);
3630 gpie |= TXGBE_GPIE_MSIX;
3631 wr32(hw, TXGBE_GPIE, gpie);
3633 /* Populate the IVAR table and set the ITR values to the
3634 * corresponding register.
3636 if (rte_intr_dp_is_en(intr_handle)) {
3637 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3639 /* by default, 1:1 mapping */
3640 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3641 intr_handle->intr_vec[queue_id] = vec;
3642 if (vec < base + intr_handle->nb_efd - 1)
3646 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3648 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3649 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3654 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3655 uint16_t queue_idx, uint16_t tx_rate)
3657 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3660 if (queue_idx >= hw->mac.max_tx_queues)
3664 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3665 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3671 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3672 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3674 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3676 /* Set ARBTXRATE of queue X */
3677 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3678 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3685 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3686 struct rte_eth_syn_filter *filter,
3689 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3690 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3694 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3697 syn_info = filter_info->syn_info;
3700 if (syn_info & TXGBE_SYNCLS_ENA)
3702 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3703 synqf |= TXGBE_SYNCLS_ENA;
3705 if (filter->hig_pri)
3706 synqf |= TXGBE_SYNCLS_HIPRIO;
3708 synqf &= ~TXGBE_SYNCLS_HIPRIO;
3710 synqf = rd32(hw, TXGBE_SYNCLS);
3711 if (!(syn_info & TXGBE_SYNCLS_ENA))
3713 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3716 filter_info->syn_info = synqf;
3717 wr32(hw, TXGBE_SYNCLS, synqf);
3722 static inline enum txgbe_5tuple_protocol
3723 convert_protocol_type(uint8_t protocol_value)
3725 if (protocol_value == IPPROTO_TCP)
3726 return TXGBE_5TF_PROT_TCP;
3727 else if (protocol_value == IPPROTO_UDP)
3728 return TXGBE_5TF_PROT_UDP;
3729 else if (protocol_value == IPPROTO_SCTP)
3730 return TXGBE_5TF_PROT_SCTP;
3732 return TXGBE_5TF_PROT_NONE;
3735 /* inject a 5-tuple filter to HW */
3737 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3738 struct txgbe_5tuple_filter *filter)
3740 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3742 uint32_t ftqf, sdpqf;
3743 uint32_t l34timir = 0;
3744 uint32_t mask = TXGBE_5TFCTL0_MASK;
3747 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3748 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3750 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3751 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3752 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3753 mask &= ~TXGBE_5TFCTL0_MSADDR;
3754 if (filter->filter_info.dst_ip_mask == 0)
3755 mask &= ~TXGBE_5TFCTL0_MDADDR;
3756 if (filter->filter_info.src_port_mask == 0)
3757 mask &= ~TXGBE_5TFCTL0_MSPORT;
3758 if (filter->filter_info.dst_port_mask == 0)
3759 mask &= ~TXGBE_5TFCTL0_MDPORT;
3760 if (filter->filter_info.proto_mask == 0)
3761 mask &= ~TXGBE_5TFCTL0_MPROTO;
3763 ftqf |= TXGBE_5TFCTL0_MPOOL;
3764 ftqf |= TXGBE_5TFCTL0_ENA;
3766 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3767 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3768 wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3769 wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3771 l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3772 wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3776 * add a 5tuple filter
3779 * dev: Pointer to struct rte_eth_dev.
3780 * index: the index the filter allocates.
3781 * filter: pointer to the filter that will be added.
3782 * rx_queue: the queue id the filter assigned to.
3785 * - On success, zero.
3786 * - On failure, a negative value.
3789 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3790 struct txgbe_5tuple_filter *filter)
3792 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3796 * look for an unused 5tuple filter index,
3797 * and insert the filter to list.
3799 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3800 idx = i / (sizeof(uint32_t) * NBBY);
3801 shift = i % (sizeof(uint32_t) * NBBY);
3802 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3803 filter_info->fivetuple_mask[idx] |= 1 << shift;
3805 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3811 if (i >= TXGBE_MAX_FTQF_FILTERS) {
3812 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3816 txgbe_inject_5tuple_filter(dev, filter);
3822 * remove a 5tuple filter
3825 * dev: Pointer to struct rte_eth_dev.
3826 * filter: the pointer of the filter will be removed.
3829 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3830 struct txgbe_5tuple_filter *filter)
3832 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3833 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3834 uint16_t index = filter->index;
3836 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3837 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3838 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3841 wr32(hw, TXGBE_5TFDADDR(index), 0);
3842 wr32(hw, TXGBE_5TFSADDR(index), 0);
3843 wr32(hw, TXGBE_5TFPORT(index), 0);
3844 wr32(hw, TXGBE_5TFCTL0(index), 0);
3845 wr32(hw, TXGBE_5TFCTL1(index), 0);
3848 static inline struct txgbe_5tuple_filter *
3849 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3850 struct txgbe_5tuple_filter_info *key)
3852 struct txgbe_5tuple_filter *it;
3854 TAILQ_FOREACH(it, filter_list, entries) {
3855 if (memcmp(key, &it->filter_info,
3856 sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3863 /* translate elements in struct rte_eth_ntuple_filter
3864 * to struct txgbe_5tuple_filter_info
3867 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3868 struct txgbe_5tuple_filter_info *filter_info)
3870 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3871 filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3872 filter->priority < TXGBE_5TUPLE_MIN_PRI)
3875 switch (filter->dst_ip_mask) {
3877 filter_info->dst_ip_mask = 0;
3878 filter_info->dst_ip = filter->dst_ip;
3881 filter_info->dst_ip_mask = 1;
3884 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3888 switch (filter->src_ip_mask) {
3890 filter_info->src_ip_mask = 0;
3891 filter_info->src_ip = filter->src_ip;
3894 filter_info->src_ip_mask = 1;
3897 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3901 switch (filter->dst_port_mask) {
3903 filter_info->dst_port_mask = 0;
3904 filter_info->dst_port = filter->dst_port;
3907 filter_info->dst_port_mask = 1;
3910 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3914 switch (filter->src_port_mask) {
3916 filter_info->src_port_mask = 0;
3917 filter_info->src_port = filter->src_port;
3920 filter_info->src_port_mask = 1;
3923 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3927 switch (filter->proto_mask) {
3929 filter_info->proto_mask = 0;
3930 filter_info->proto =
3931 convert_protocol_type(filter->proto);
3934 filter_info->proto_mask = 1;
3937 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3941 filter_info->priority = (uint8_t)filter->priority;
3946 * add or delete a ntuple filter
3949 * dev: Pointer to struct rte_eth_dev.
3950 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3951 * add: if true, add filter, if false, remove filter
3954 * - On success, zero.
3955 * - On failure, a negative value.
3958 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
3959 struct rte_eth_ntuple_filter *ntuple_filter,
3962 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3963 struct txgbe_5tuple_filter_info filter_5tuple;
3964 struct txgbe_5tuple_filter *filter;
3967 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
3968 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3972 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
3973 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
3977 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
3979 if (filter != NULL && add) {
3980 PMD_DRV_LOG(ERR, "filter exists.");
3983 if (filter == NULL && !add) {
3984 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3989 filter = rte_zmalloc("txgbe_5tuple_filter",
3990 sizeof(struct txgbe_5tuple_filter), 0);
3993 rte_memcpy(&filter->filter_info,
3995 sizeof(struct txgbe_5tuple_filter_info));
3996 filter->queue = ntuple_filter->queue;
3997 ret = txgbe_add_5tuple_filter(dev, filter);
4003 txgbe_remove_5tuple_filter(dev, filter);
4010 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4011 struct rte_eth_ethertype_filter *filter,
4014 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4015 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4019 struct txgbe_ethertype_filter ethertype_filter;
4021 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4024 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4025 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4026 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4027 " ethertype filter.", filter->ether_type);
4031 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4032 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4035 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4036 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4040 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4041 if (ret >= 0 && add) {
4042 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4043 filter->ether_type);
4046 if (ret < 0 && !add) {
4047 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4048 filter->ether_type);
4053 etqf = TXGBE_ETFLT_ENA;
4054 etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4055 etqs |= TXGBE_ETCLS_QPID(filter->queue);
4056 etqs |= TXGBE_ETCLS_QENA;
4058 ethertype_filter.ethertype = filter->ether_type;
4059 ethertype_filter.etqf = etqf;
4060 ethertype_filter.etqs = etqs;
4061 ethertype_filter.conf = FALSE;
4062 ret = txgbe_ethertype_filter_insert(filter_info,
4065 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4069 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4073 wr32(hw, TXGBE_ETFLT(ret), etqf);
4074 wr32(hw, TXGBE_ETCLS(ret), etqs);
4081 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
4082 enum rte_filter_type filter_type,
4083 enum rte_filter_op filter_op,
4088 switch (filter_type) {
4089 case RTE_ETH_FILTER_GENERIC:
4090 if (filter_op != RTE_ETH_FILTER_GET)
4092 *(const void **)arg = &txgbe_flow_ops;
4095 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4105 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4106 u8 **mc_addr_ptr, u32 *vmdq)
4111 mc_addr = *mc_addr_ptr;
4112 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4117 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4118 struct rte_ether_addr *mc_addr_set,
4119 uint32_t nb_mc_addr)
4121 struct txgbe_hw *hw;
4124 hw = TXGBE_DEV_HW(dev);
4125 mc_addr_list = (u8 *)mc_addr_set;
4126 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4127 txgbe_dev_addr_list_itr, TRUE);
4131 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4134 uint64_t systime_cycles;
4136 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4137 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4139 return systime_cycles;
4143 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4145 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4146 uint64_t rx_tstamp_cycles;
4148 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4149 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4150 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4152 return rx_tstamp_cycles;
4156 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4158 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4159 uint64_t tx_tstamp_cycles;
4161 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4162 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4163 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4165 return tx_tstamp_cycles;
4169 txgbe_start_timecounters(struct rte_eth_dev *dev)
4171 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4172 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4173 struct rte_eth_link link;
4174 uint32_t incval = 0;
4177 /* Get current link speed. */
4178 txgbe_dev_link_update(dev, 1);
4179 rte_eth_linkstatus_get(dev, &link);
4181 switch (link.link_speed) {
4182 case ETH_SPEED_NUM_100M:
4183 incval = TXGBE_INCVAL_100;
4184 shift = TXGBE_INCVAL_SHIFT_100;
4186 case ETH_SPEED_NUM_1G:
4187 incval = TXGBE_INCVAL_1GB;
4188 shift = TXGBE_INCVAL_SHIFT_1GB;
4190 case ETH_SPEED_NUM_10G:
4192 incval = TXGBE_INCVAL_10GB;
4193 shift = TXGBE_INCVAL_SHIFT_10GB;
4197 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4199 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4200 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4201 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4203 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4204 adapter->systime_tc.cc_shift = shift;
4205 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4207 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4208 adapter->rx_tstamp_tc.cc_shift = shift;
4209 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4211 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4212 adapter->tx_tstamp_tc.cc_shift = shift;
4213 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4217 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4219 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4221 adapter->systime_tc.nsec += delta;
4222 adapter->rx_tstamp_tc.nsec += delta;
4223 adapter->tx_tstamp_tc.nsec += delta;
4229 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4232 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4234 ns = rte_timespec_to_ns(ts);
4235 /* Set the timecounters to a new value. */
4236 adapter->systime_tc.nsec = ns;
4237 adapter->rx_tstamp_tc.nsec = ns;
4238 adapter->tx_tstamp_tc.nsec = ns;
4244 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4246 uint64_t ns, systime_cycles;
4247 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4249 systime_cycles = txgbe_read_systime_cyclecounter(dev);
4250 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4251 *ts = rte_ns_to_timespec(ns);
4257 txgbe_timesync_enable(struct rte_eth_dev *dev)
4259 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4262 /* Stop the timesync system time. */
4263 wr32(hw, TXGBE_TSTIMEINC, 0x0);
4264 /* Reset the timesync system time value. */
4265 wr32(hw, TXGBE_TSTIMEL, 0x0);
4266 wr32(hw, TXGBE_TSTIMEH, 0x0);
4268 txgbe_start_timecounters(dev);
4270 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4271 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4272 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4274 /* Enable timestamping of received PTP packets. */
4275 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4276 tsync_ctl |= TXGBE_TSRXCTL_ENA;
4277 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4279 /* Enable timestamping of transmitted PTP packets. */
4280 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4281 tsync_ctl |= TXGBE_TSTXCTL_ENA;
4282 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4290 txgbe_timesync_disable(struct rte_eth_dev *dev)
4292 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4295 /* Disable timestamping of transmitted PTP packets. */
4296 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4297 tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4298 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4300 /* Disable timestamping of received PTP packets. */
4301 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4302 tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4303 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4305 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4306 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4308 /* Stop incrementating the System Time registers. */
4309 wr32(hw, TXGBE_TSTIMEINC, 0);
4315 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4316 struct timespec *timestamp,
4317 uint32_t flags __rte_unused)
4319 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4320 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4321 uint32_t tsync_rxctl;
4322 uint64_t rx_tstamp_cycles;
4325 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4326 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4329 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4330 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4331 *timestamp = rte_ns_to_timespec(ns);
4337 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4338 struct timespec *timestamp)
4340 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4341 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4342 uint32_t tsync_txctl;
4343 uint64_t tx_tstamp_cycles;
4346 tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4347 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4350 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4351 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4352 *timestamp = rte_ns_to_timespec(ns);
4358 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4362 const struct reg_info *reg_group;
4363 const struct reg_info **reg_set = txgbe_regs_others;
4365 while ((reg_group = reg_set[g_ind++]))
4366 count += txgbe_regs_group_count(reg_group);
4372 txgbe_get_regs(struct rte_eth_dev *dev,
4373 struct rte_dev_reg_info *regs)
4375 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4376 uint32_t *data = regs->data;
4379 const struct reg_info *reg_group;
4380 const struct reg_info **reg_set = txgbe_regs_others;
4383 regs->length = txgbe_get_reg_length(dev);
4384 regs->width = sizeof(uint32_t);
4388 /* Support only full register dump */
4389 if (regs->length == 0 ||
4390 regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4391 regs->version = hw->mac.type << 24 |
4392 hw->revision_id << 16 |
4394 while ((reg_group = reg_set[g_ind++]))
4395 count += txgbe_read_regs_group(dev, &data[count],
4404 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4406 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4408 /* Return unit is byte count */
4409 return hw->rom.word_size * 2;
4413 txgbe_get_eeprom(struct rte_eth_dev *dev,
4414 struct rte_dev_eeprom_info *in_eeprom)
4416 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4417 struct txgbe_rom_info *eeprom = &hw->rom;
4418 uint16_t *data = in_eeprom->data;
4421 first = in_eeprom->offset >> 1;
4422 length = in_eeprom->length >> 1;
4423 if (first > hw->rom.word_size ||
4424 ((first + length) > hw->rom.word_size))
4427 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4429 return eeprom->readw_buffer(hw, first, length, data);
4433 txgbe_set_eeprom(struct rte_eth_dev *dev,
4434 struct rte_dev_eeprom_info *in_eeprom)
4436 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4437 struct txgbe_rom_info *eeprom = &hw->rom;
4438 uint16_t *data = in_eeprom->data;
4441 first = in_eeprom->offset >> 1;
4442 length = in_eeprom->length >> 1;
4443 if (first > hw->rom.word_size ||
4444 ((first + length) > hw->rom.word_size))
4447 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4449 return eeprom->writew_buffer(hw, first, length, data);
4453 txgbe_get_module_info(struct rte_eth_dev *dev,
4454 struct rte_eth_dev_module_info *modinfo)
4456 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4458 uint8_t sff8472_rev, addr_mode;
4459 bool page_swap = false;
4461 /* Check whether we support SFF-8472 or not */
4462 status = hw->phy.read_i2c_eeprom(hw,
4463 TXGBE_SFF_SFF_8472_COMP,
4468 /* addressing mode is not supported */
4469 status = hw->phy.read_i2c_eeprom(hw,
4470 TXGBE_SFF_SFF_8472_SWAP,
4475 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4477 "Address change required to access page 0xA2, "
4478 "but not supported. Please report the module "
4479 "type to the driver maintainers.");
4483 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4484 /* We have a SFP, but it does not support SFF-8472 */
4485 modinfo->type = RTE_ETH_MODULE_SFF_8079;
4486 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4488 /* We have a SFP which supports a revision of SFF-8472. */
4489 modinfo->type = RTE_ETH_MODULE_SFF_8472;
4490 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4497 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4498 struct rte_dev_eeprom_info *info)
4500 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4501 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4502 uint8_t databyte = 0xFF;
4503 uint8_t *data = info->data;
4506 if (info->length == 0)
4509 for (i = info->offset; i < info->offset + info->length; i++) {
4510 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4511 status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4513 status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4518 data[i - info->offset] = databyte;
4525 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4528 case txgbe_mac_raptor:
4529 case txgbe_mac_raptor_vf:
4537 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4538 struct rte_eth_dcb_info *dcb_info)
4540 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4541 struct txgbe_dcb_tc_config *tc;
4542 struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4546 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4547 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4549 dcb_info->nb_tcs = 1;
4551 tc_queue = &dcb_info->tc_queue;
4552 nb_tcs = dcb_info->nb_tcs;
4554 if (dcb_config->vt_mode) { /* vt is enabled */
4555 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4556 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4557 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4558 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4559 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4560 for (j = 0; j < nb_tcs; j++) {
4561 tc_queue->tc_rxq[0][j].base = j;
4562 tc_queue->tc_rxq[0][j].nb_queue = 1;
4563 tc_queue->tc_txq[0][j].base = j;
4564 tc_queue->tc_txq[0][j].nb_queue = 1;
4567 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4568 for (j = 0; j < nb_tcs; j++) {
4569 tc_queue->tc_rxq[i][j].base =
4571 tc_queue->tc_rxq[i][j].nb_queue = 1;
4572 tc_queue->tc_txq[i][j].base =
4574 tc_queue->tc_txq[i][j].nb_queue = 1;
4578 } else { /* vt is disabled */
4579 struct rte_eth_dcb_rx_conf *rx_conf =
4580 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4581 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4582 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4583 if (dcb_info->nb_tcs == ETH_4_TCS) {
4584 for (i = 0; i < dcb_info->nb_tcs; i++) {
4585 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4586 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4588 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4589 dcb_info->tc_queue.tc_txq[0][1].base = 64;
4590 dcb_info->tc_queue.tc_txq[0][2].base = 96;
4591 dcb_info->tc_queue.tc_txq[0][3].base = 112;
4592 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4593 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4594 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4595 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4596 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4597 for (i = 0; i < dcb_info->nb_tcs; i++) {
4598 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4599 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4601 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4602 dcb_info->tc_queue.tc_txq[0][1].base = 32;
4603 dcb_info->tc_queue.tc_txq[0][2].base = 64;
4604 dcb_info->tc_queue.tc_txq[0][3].base = 80;
4605 dcb_info->tc_queue.tc_txq[0][4].base = 96;
4606 dcb_info->tc_queue.tc_txq[0][5].base = 104;
4607 dcb_info->tc_queue.tc_txq[0][6].base = 112;
4608 dcb_info->tc_queue.tc_txq[0][7].base = 120;
4609 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4610 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4611 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4612 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4613 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4614 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4615 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4616 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4619 for (i = 0; i < dcb_info->nb_tcs; i++) {
4620 tc = &dcb_config->tc_config[i];
4621 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4626 /* Update e-tag ether type */
4628 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4629 uint16_t ether_type)
4631 uint32_t etag_etype;
4633 etag_etype = rd32(hw, TXGBE_EXTAG);
4634 etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4635 etag_etype |= ether_type;
4636 wr32(hw, TXGBE_EXTAG, etag_etype);
4642 /* Enable e-tag tunnel */
4644 txgbe_e_tag_enable(struct txgbe_hw *hw)
4646 uint32_t etag_etype;
4648 etag_etype = rd32(hw, TXGBE_PORTCTL);
4649 etag_etype |= TXGBE_PORTCTL_ETAG;
4650 wr32(hw, TXGBE_PORTCTL, etag_etype);
4657 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4658 struct txgbe_l2_tunnel_conf *l2_tunnel)
4661 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4662 uint32_t i, rar_entries;
4663 uint32_t rar_low, rar_high;
4665 rar_entries = hw->mac.num_rar_entries;
4667 for (i = 1; i < rar_entries; i++) {
4668 wr32(hw, TXGBE_ETHADDRIDX, i);
4669 rar_high = rd32(hw, TXGBE_ETHADDRH);
4670 rar_low = rd32(hw, TXGBE_ETHADDRL);
4671 if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4672 (rar_high & TXGBE_ETHADDRH_ETAG) &&
4673 (TXGBE_ETHADDRL_ETAG(rar_low) ==
4674 l2_tunnel->tunnel_id)) {
4675 wr32(hw, TXGBE_ETHADDRL, 0);
4676 wr32(hw, TXGBE_ETHADDRH, 0);
4678 txgbe_clear_vmdq(hw, i, BIT_MASK32);
4688 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4689 struct txgbe_l2_tunnel_conf *l2_tunnel)
4692 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4693 uint32_t i, rar_entries;
4694 uint32_t rar_low, rar_high;
4696 /* One entry for one tunnel. Try to remove potential existing entry. */
4697 txgbe_e_tag_filter_del(dev, l2_tunnel);
4699 rar_entries = hw->mac.num_rar_entries;
4701 for (i = 1; i < rar_entries; i++) {
4702 wr32(hw, TXGBE_ETHADDRIDX, i);
4703 rar_high = rd32(hw, TXGBE_ETHADDRH);
4704 if (rar_high & TXGBE_ETHADDRH_VLD) {
4707 txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4708 rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4709 rar_low = l2_tunnel->tunnel_id;
4711 wr32(hw, TXGBE_ETHADDRL, rar_low);
4712 wr32(hw, TXGBE_ETHADDRH, rar_high);
4718 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4719 " Please remove a rule before adding a new one.");
4723 static inline struct txgbe_l2_tn_filter *
4724 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4725 struct txgbe_l2_tn_key *key)
4729 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4733 return l2_tn_info->hash_map[ret];
4737 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4738 struct txgbe_l2_tn_filter *l2_tn_filter)
4742 ret = rte_hash_add_key(l2_tn_info->hash_handle,
4743 &l2_tn_filter->key);
4747 "Failed to insert L2 tunnel filter"
4748 " to hash table %d!",
4753 l2_tn_info->hash_map[ret] = l2_tn_filter;
4755 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4761 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4762 struct txgbe_l2_tn_key *key)
4765 struct txgbe_l2_tn_filter *l2_tn_filter;
4767 ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4771 "No such L2 tunnel filter to delete %d!",
4776 l2_tn_filter = l2_tn_info->hash_map[ret];
4777 l2_tn_info->hash_map[ret] = NULL;
4779 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4780 rte_free(l2_tn_filter);
4785 /* Add l2 tunnel filter */
4787 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4788 struct txgbe_l2_tunnel_conf *l2_tunnel,
4792 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4793 struct txgbe_l2_tn_key key;
4794 struct txgbe_l2_tn_filter *node;
4797 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4798 key.tn_id = l2_tunnel->tunnel_id;
4800 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4804 "The L2 tunnel filter already exists!");
4808 node = rte_zmalloc("txgbe_l2_tn",
4809 sizeof(struct txgbe_l2_tn_filter),
4814 rte_memcpy(&node->key,
4816 sizeof(struct txgbe_l2_tn_key));
4817 node->pool = l2_tunnel->pool;
4818 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4825 switch (l2_tunnel->l2_tunnel_type) {
4826 case RTE_L2_TUNNEL_TYPE_E_TAG:
4827 ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4830 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4835 if (!restore && ret < 0)
4836 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4841 /* Delete l2 tunnel filter */
4843 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4844 struct txgbe_l2_tunnel_conf *l2_tunnel)
4847 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4848 struct txgbe_l2_tn_key key;
4850 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4851 key.tn_id = l2_tunnel->tunnel_id;
4852 ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4856 switch (l2_tunnel->l2_tunnel_type) {
4857 case RTE_L2_TUNNEL_TYPE_E_TAG:
4858 ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4861 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4870 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4874 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4876 ctrl = rd32(hw, TXGBE_POOLCTL);
4877 ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4879 ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4880 wr32(hw, TXGBE_POOLCTL, ctrl);
4885 /* Add UDP tunneling port */
4887 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4888 struct rte_eth_udp_tunnel *udp_tunnel)
4890 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4893 if (udp_tunnel == NULL)
4896 switch (udp_tunnel->prot_type) {
4897 case RTE_TUNNEL_TYPE_VXLAN:
4898 if (udp_tunnel->udp_port == 0) {
4899 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4903 wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4904 wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
4906 case RTE_TUNNEL_TYPE_GENEVE:
4907 if (udp_tunnel->udp_port == 0) {
4908 PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
4912 wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
4914 case RTE_TUNNEL_TYPE_TEREDO:
4915 if (udp_tunnel->udp_port == 0) {
4916 PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
4920 wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
4923 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4933 /* Remove UDP tunneling port */
4935 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
4936 struct rte_eth_udp_tunnel *udp_tunnel)
4938 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4942 if (udp_tunnel == NULL)
4945 switch (udp_tunnel->prot_type) {
4946 case RTE_TUNNEL_TYPE_VXLAN:
4947 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
4948 if (cur_port != udp_tunnel->udp_port) {
4949 PMD_DRV_LOG(ERR, "Port %u does not exist.",
4950 udp_tunnel->udp_port);
4954 wr32(hw, TXGBE_VXLANPORT, 0);
4955 wr32(hw, TXGBE_VXLANPORTGPE, 0);
4957 case RTE_TUNNEL_TYPE_GENEVE:
4958 cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
4959 if (cur_port != udp_tunnel->udp_port) {
4960 PMD_DRV_LOG(ERR, "Port %u does not exist.",
4961 udp_tunnel->udp_port);
4965 wr32(hw, TXGBE_GENEVEPORT, 0);
4967 case RTE_TUNNEL_TYPE_TEREDO:
4968 cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
4969 if (cur_port != udp_tunnel->udp_port) {
4970 PMD_DRV_LOG(ERR, "Port %u does not exist.",
4971 udp_tunnel->udp_port);
4975 wr32(hw, TXGBE_TEREDOPORT, 0);
4978 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4988 /* restore n-tuple filter */
4990 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
4992 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4993 struct txgbe_5tuple_filter *node;
4995 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
4996 txgbe_inject_5tuple_filter(dev, node);
5000 /* restore ethernet type filter */
5002 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5004 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5005 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5008 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5009 if (filter_info->ethertype_mask & (1 << i)) {
5010 wr32(hw, TXGBE_ETFLT(i),
5011 filter_info->ethertype_filters[i].etqf);
5012 wr32(hw, TXGBE_ETCLS(i),
5013 filter_info->ethertype_filters[i].etqs);
5019 /* restore SYN filter */
5021 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5023 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5024 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5027 synqf = filter_info->syn_info;
5029 if (synqf & TXGBE_SYNCLS_ENA) {
5030 wr32(hw, TXGBE_SYNCLS, synqf);
5035 /* restore L2 tunnel filter */
5037 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5039 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5040 struct txgbe_l2_tn_filter *node;
5041 struct txgbe_l2_tunnel_conf l2_tn_conf;
5043 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5044 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5045 l2_tn_conf.tunnel_id = node->key.tn_id;
5046 l2_tn_conf.pool = node->pool;
5047 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5051 /* restore rss filter */
5053 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5055 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5057 if (filter_info->rss_info.conf.queue_num)
5058 txgbe_config_rss_filter(dev,
5059 &filter_info->rss_info, TRUE);
5063 txgbe_filter_restore(struct rte_eth_dev *dev)
5065 txgbe_ntuple_filter_restore(dev);
5066 txgbe_ethertype_filter_restore(dev);
5067 txgbe_syn_filter_restore(dev);
5068 txgbe_fdir_filter_restore(dev);
5069 txgbe_l2_tn_filter_restore(dev);
5070 txgbe_rss_filter_restore(dev);
5076 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5078 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5079 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5081 if (l2_tn_info->e_tag_en)
5082 (void)txgbe_e_tag_enable(hw);
5084 if (l2_tn_info->e_tag_fwd_en)
5085 (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5087 (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5090 /* remove all the n-tuple filters */
5092 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5094 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5095 struct txgbe_5tuple_filter *p_5tuple;
5097 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5098 txgbe_remove_5tuple_filter(dev, p_5tuple);
5101 /* remove all the ether type filters */
5103 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5105 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5106 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5109 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5110 if (filter_info->ethertype_mask & (1 << i) &&
5111 !filter_info->ethertype_filters[i].conf) {
5112 (void)txgbe_ethertype_filter_remove(filter_info,
5114 wr32(hw, TXGBE_ETFLT(i), 0);
5115 wr32(hw, TXGBE_ETCLS(i), 0);
5121 /* remove the SYN filter */
5123 txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5125 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5126 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5128 if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5129 filter_info->syn_info = 0;
5131 wr32(hw, TXGBE_SYNCLS, 0);
5136 /* remove all the L2 tunnel filters */
5138 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5140 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5141 struct txgbe_l2_tn_filter *l2_tn_filter;
5142 struct txgbe_l2_tunnel_conf l2_tn_conf;
5145 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5146 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5147 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
5148 l2_tn_conf.pool = l2_tn_filter->pool;
5149 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5157 static const struct eth_dev_ops txgbe_eth_dev_ops = {
5158 .dev_configure = txgbe_dev_configure,
5159 .dev_infos_get = txgbe_dev_info_get,
5160 .dev_start = txgbe_dev_start,
5161 .dev_stop = txgbe_dev_stop,
5162 .dev_set_link_up = txgbe_dev_set_link_up,
5163 .dev_set_link_down = txgbe_dev_set_link_down,
5164 .dev_close = txgbe_dev_close,
5165 .dev_reset = txgbe_dev_reset,
5166 .promiscuous_enable = txgbe_dev_promiscuous_enable,
5167 .promiscuous_disable = txgbe_dev_promiscuous_disable,
5168 .allmulticast_enable = txgbe_dev_allmulticast_enable,
5169 .allmulticast_disable = txgbe_dev_allmulticast_disable,
5170 .link_update = txgbe_dev_link_update,
5171 .stats_get = txgbe_dev_stats_get,
5172 .xstats_get = txgbe_dev_xstats_get,
5173 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
5174 .stats_reset = txgbe_dev_stats_reset,
5175 .xstats_reset = txgbe_dev_xstats_reset,
5176 .xstats_get_names = txgbe_dev_xstats_get_names,
5177 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
5178 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
5179 .fw_version_get = txgbe_fw_version_get,
5180 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
5181 .mtu_set = txgbe_dev_mtu_set,
5182 .vlan_filter_set = txgbe_vlan_filter_set,
5183 .vlan_tpid_set = txgbe_vlan_tpid_set,
5184 .vlan_offload_set = txgbe_vlan_offload_set,
5185 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
5186 .rx_queue_start = txgbe_dev_rx_queue_start,
5187 .rx_queue_stop = txgbe_dev_rx_queue_stop,
5188 .tx_queue_start = txgbe_dev_tx_queue_start,
5189 .tx_queue_stop = txgbe_dev_tx_queue_stop,
5190 .rx_queue_setup = txgbe_dev_rx_queue_setup,
5191 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
5192 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
5193 .rx_queue_release = txgbe_dev_rx_queue_release,
5194 .tx_queue_setup = txgbe_dev_tx_queue_setup,
5195 .tx_queue_release = txgbe_dev_tx_queue_release,
5196 .dev_led_on = txgbe_dev_led_on,
5197 .dev_led_off = txgbe_dev_led_off,
5198 .flow_ctrl_get = txgbe_flow_ctrl_get,
5199 .flow_ctrl_set = txgbe_flow_ctrl_set,
5200 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
5201 .mac_addr_add = txgbe_add_rar,
5202 .mac_addr_remove = txgbe_remove_rar,
5203 .mac_addr_set = txgbe_set_default_mac_addr,
5204 .uc_hash_table_set = txgbe_uc_hash_table_set,
5205 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
5206 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
5207 .reta_update = txgbe_dev_rss_reta_update,
5208 .reta_query = txgbe_dev_rss_reta_query,
5209 .rss_hash_update = txgbe_dev_rss_hash_update,
5210 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
5211 .filter_ctrl = txgbe_dev_filter_ctrl,
5212 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
5213 .rxq_info_get = txgbe_rxq_info_get,
5214 .txq_info_get = txgbe_txq_info_get,
5215 .timesync_enable = txgbe_timesync_enable,
5216 .timesync_disable = txgbe_timesync_disable,
5217 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5218 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5219 .get_reg = txgbe_get_regs,
5220 .get_eeprom_length = txgbe_get_eeprom_length,
5221 .get_eeprom = txgbe_get_eeprom,
5222 .set_eeprom = txgbe_set_eeprom,
5223 .get_module_info = txgbe_get_module_info,
5224 .get_module_eeprom = txgbe_get_module_eeprom,
5225 .get_dcb_info = txgbe_dev_get_dcb_info,
5226 .timesync_adjust_time = txgbe_timesync_adjust_time,
5227 .timesync_read_time = txgbe_timesync_read_time,
5228 .timesync_write_time = txgbe_timesync_write_time,
5229 .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
5230 .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
5231 .tm_ops_get = txgbe_tm_ops_get,
5232 .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
5235 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5236 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5237 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5239 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
5240 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
5242 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5243 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
5245 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5246 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
5249 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5250 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);