1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
26 static const struct reg_info txgbe_regs_general[] = {
27 {TXGBE_RST, 1, 1, "TXGBE_RST"},
28 {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30 {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
36 static const struct reg_info txgbe_regs_nvm[] = {
40 static const struct reg_info txgbe_regs_interrupt[] = {
44 static const struct reg_info txgbe_regs_fctl_others[] = {
48 static const struct reg_info txgbe_regs_rxdma[] = {
52 static const struct reg_info txgbe_regs_rx[] = {
56 static struct reg_info txgbe_regs_tx[] = {
60 static const struct reg_info txgbe_regs_wakeup[] = {
64 static const struct reg_info txgbe_regs_dcb[] = {
68 static const struct reg_info txgbe_regs_mac[] = {
72 static const struct reg_info txgbe_regs_diagnostic[] = {
77 static const struct reg_info *txgbe_regs_others[] = {
81 txgbe_regs_fctl_others,
88 txgbe_regs_diagnostic,
91 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
92 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
93 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
96 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
97 static int txgbe_dev_close(struct rte_eth_dev *dev);
98 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
99 int wait_to_complete);
100 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
101 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
102 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
105 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
106 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
107 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
108 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
109 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
110 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
111 struct rte_intr_handle *handle);
112 static void txgbe_dev_interrupt_handler(void *param);
113 static void txgbe_dev_interrupt_delayed_handler(void *param);
114 static void txgbe_configure_msix(struct rte_eth_dev *dev);
116 static int txgbe_filter_restore(struct rte_eth_dev *dev);
117 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
119 #define TXGBE_SET_HWSTRIP(h, q) do {\
120 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
121 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
122 (h)->bitmap[idx] |= 1 << bit;\
125 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
126 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
127 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
128 (h)->bitmap[idx] &= ~(1 << bit);\
131 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
132 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
133 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
134 (r) = (h)->bitmap[idx] >> bit & 1;\
138 * The set of PCI devices this driver supports
140 static const struct rte_pci_id pci_id_txgbe_map[] = {
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
143 { .vendor_id = 0, /* sentinel */ },
146 static const struct rte_eth_desc_lim rx_desc_lim = {
147 .nb_max = TXGBE_RING_DESC_MAX,
148 .nb_min = TXGBE_RING_DESC_MIN,
149 .nb_align = TXGBE_RXD_ALIGN,
152 static const struct rte_eth_desc_lim tx_desc_lim = {
153 .nb_max = TXGBE_RING_DESC_MAX,
154 .nb_min = TXGBE_RING_DESC_MIN,
155 .nb_align = TXGBE_TXD_ALIGN,
156 .nb_seg_max = TXGBE_TX_MAX_SEG,
157 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
160 static const struct eth_dev_ops txgbe_eth_dev_ops;
162 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
163 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
164 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
166 HW_XSTAT(mng_bmc2host_packets),
167 HW_XSTAT(mng_host2bmc_packets),
169 HW_XSTAT(rx_packets),
170 HW_XSTAT(tx_packets),
173 HW_XSTAT(rx_total_bytes),
174 HW_XSTAT(rx_total_packets),
175 HW_XSTAT(tx_total_packets),
176 HW_XSTAT(rx_total_missed_packets),
177 HW_XSTAT(rx_broadcast_packets),
178 HW_XSTAT(rx_multicast_packets),
179 HW_XSTAT(rx_management_packets),
180 HW_XSTAT(tx_management_packets),
181 HW_XSTAT(rx_management_dropped),
184 HW_XSTAT(rx_crc_errors),
185 HW_XSTAT(rx_illegal_byte_errors),
186 HW_XSTAT(rx_error_bytes),
187 HW_XSTAT(rx_mac_short_packet_dropped),
188 HW_XSTAT(rx_length_errors),
189 HW_XSTAT(rx_undersize_errors),
190 HW_XSTAT(rx_fragment_errors),
191 HW_XSTAT(rx_oversize_errors),
192 HW_XSTAT(rx_jabber_errors),
193 HW_XSTAT(rx_l3_l4_xsum_error),
194 HW_XSTAT(mac_local_errors),
195 HW_XSTAT(mac_remote_errors),
198 HW_XSTAT(flow_director_added_filters),
199 HW_XSTAT(flow_director_removed_filters),
200 HW_XSTAT(flow_director_filter_add_errors),
201 HW_XSTAT(flow_director_filter_remove_errors),
202 HW_XSTAT(flow_director_matched_filters),
203 HW_XSTAT(flow_director_missed_filters),
206 HW_XSTAT(rx_fcoe_crc_errors),
207 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
208 HW_XSTAT(rx_fcoe_dropped),
209 HW_XSTAT(rx_fcoe_packets),
210 HW_XSTAT(tx_fcoe_packets),
211 HW_XSTAT(rx_fcoe_bytes),
212 HW_XSTAT(tx_fcoe_bytes),
213 HW_XSTAT(rx_fcoe_no_ddp),
214 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
217 HW_XSTAT(tx_macsec_pkts_untagged),
218 HW_XSTAT(tx_macsec_pkts_encrypted),
219 HW_XSTAT(tx_macsec_pkts_protected),
220 HW_XSTAT(tx_macsec_octets_encrypted),
221 HW_XSTAT(tx_macsec_octets_protected),
222 HW_XSTAT(rx_macsec_pkts_untagged),
223 HW_XSTAT(rx_macsec_pkts_badtag),
224 HW_XSTAT(rx_macsec_pkts_nosci),
225 HW_XSTAT(rx_macsec_pkts_unknownsci),
226 HW_XSTAT(rx_macsec_octets_decrypted),
227 HW_XSTAT(rx_macsec_octets_validated),
228 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
229 HW_XSTAT(rx_macsec_sc_pkts_delayed),
230 HW_XSTAT(rx_macsec_sc_pkts_late),
231 HW_XSTAT(rx_macsec_sa_pkts_ok),
232 HW_XSTAT(rx_macsec_sa_pkts_invalid),
233 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
234 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
235 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
238 HW_XSTAT(rx_size_64_packets),
239 HW_XSTAT(rx_size_65_to_127_packets),
240 HW_XSTAT(rx_size_128_to_255_packets),
241 HW_XSTAT(rx_size_256_to_511_packets),
242 HW_XSTAT(rx_size_512_to_1023_packets),
243 HW_XSTAT(rx_size_1024_to_max_packets),
244 HW_XSTAT(tx_size_64_packets),
245 HW_XSTAT(tx_size_65_to_127_packets),
246 HW_XSTAT(tx_size_128_to_255_packets),
247 HW_XSTAT(tx_size_256_to_511_packets),
248 HW_XSTAT(tx_size_512_to_1023_packets),
249 HW_XSTAT(tx_size_1024_to_max_packets),
252 HW_XSTAT(tx_xon_packets),
253 HW_XSTAT(rx_xon_packets),
254 HW_XSTAT(tx_xoff_packets),
255 HW_XSTAT(rx_xoff_packets),
257 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
258 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
259 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
260 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
263 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
264 sizeof(rte_txgbe_stats_strings[0]))
266 /* Per-priority statistics */
267 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
268 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
269 UP_XSTAT(rx_up_packets),
270 UP_XSTAT(tx_up_packets),
271 UP_XSTAT(rx_up_bytes),
272 UP_XSTAT(tx_up_bytes),
273 UP_XSTAT(rx_up_drop_packets),
275 UP_XSTAT(tx_up_xon_packets),
276 UP_XSTAT(rx_up_xon_packets),
277 UP_XSTAT(tx_up_xoff_packets),
278 UP_XSTAT(rx_up_xoff_packets),
279 UP_XSTAT(rx_up_dropped),
280 UP_XSTAT(rx_up_mbuf_alloc_errors),
281 UP_XSTAT(tx_up_xon2off_packets),
284 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
285 sizeof(rte_txgbe_up_strings[0]))
287 /* Per-queue statistics */
288 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
289 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
290 QP_XSTAT(rx_qp_packets),
291 QP_XSTAT(tx_qp_packets),
292 QP_XSTAT(rx_qp_bytes),
293 QP_XSTAT(tx_qp_bytes),
294 QP_XSTAT(rx_qp_mc_packets),
297 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
298 sizeof(rte_txgbe_qp_strings[0]))
301 txgbe_is_sfp(struct txgbe_hw *hw)
303 switch (hw->phy.type) {
304 case txgbe_phy_sfp_avago:
305 case txgbe_phy_sfp_ftl:
306 case txgbe_phy_sfp_intel:
307 case txgbe_phy_sfp_unknown:
308 case txgbe_phy_sfp_tyco_passive:
309 case txgbe_phy_sfp_unknown_passive:
316 static inline int32_t
317 txgbe_pf_reset_hw(struct txgbe_hw *hw)
322 status = hw->mac.reset_hw(hw);
324 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
325 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
326 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
327 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
330 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
336 txgbe_enable_intr(struct rte_eth_dev *dev)
338 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
339 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
341 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
342 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
343 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
348 txgbe_disable_intr(struct txgbe_hw *hw)
350 PMD_INIT_FUNC_TRACE();
352 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
353 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
354 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
359 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
364 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
365 struct txgbe_stat_mappings *stat_mappings =
366 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
367 uint32_t qsmr_mask = 0;
368 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
372 if (hw->mac.type != txgbe_mac_raptor)
375 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
378 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
379 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
382 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
383 if (n >= TXGBE_NB_STAT_MAPPING) {
384 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
387 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
389 /* Now clear any previous stat_idx set */
390 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
392 stat_mappings->tqsm[n] &= ~clearing_mask;
394 stat_mappings->rqsm[n] &= ~clearing_mask;
396 q_map = (uint32_t)stat_idx;
397 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
398 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
400 stat_mappings->tqsm[n] |= qsmr_mask;
402 stat_mappings->rqsm[n] |= qsmr_mask;
404 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
405 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
407 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
408 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
413 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
417 struct txgbe_dcb_tc_config *tc;
419 UNREFERENCED_PARAMETER(hw);
421 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
422 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
423 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
424 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
425 tc = &dcb_config->tc_config[i];
426 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
427 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
428 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
429 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
430 tc->pfc = txgbe_dcb_pfc_disabled;
433 /* Initialize default user to priority mapping, UPx->TC0 */
434 tc = &dcb_config->tc_config[0];
435 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
436 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
437 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
438 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
439 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
441 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
442 dcb_config->pfc_mode_enable = false;
443 dcb_config->vt_mode = true;
444 dcb_config->round_robin_enable = false;
445 /* support all DCB capabilities */
446 dcb_config->support.capabilities = 0xFF;
450 * Ensure that all locks are released before first NVM or PHY access
453 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
458 * These ones are more tricky since they are common to all ports; but
459 * swfw_sync retries last long enough (1s) to be almost sure that if
460 * lock can not be taken it is due to an improper lock of the
463 mask = TXGBE_MNGSEM_SWPHY |
465 TXGBE_MNGSEM_SWFLASH;
466 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
467 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
469 hw->mac.release_swfw_sync(hw, mask);
473 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
475 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
476 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
477 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
478 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
479 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
480 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
481 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
483 const struct rte_memzone *mz;
488 PMD_INIT_FUNC_TRACE();
490 eth_dev->dev_ops = &txgbe_eth_dev_ops;
491 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
492 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
493 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
494 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
495 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
496 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
499 * For secondary processes, we don't initialise any further as primary
500 * has already done this work. Only check we don't need a different
501 * RX and TX function.
503 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
504 struct txgbe_tx_queue *txq;
505 /* TX queue function in primary, set by last queue initialized
506 * Tx queue may not initialized by primary process
508 if (eth_dev->data->tx_queues) {
509 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
510 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
511 txgbe_set_tx_function(eth_dev, txq);
513 /* Use default TX function if we get here */
514 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
515 "Using default TX function.");
518 txgbe_set_rx_function(eth_dev);
523 rte_eth_copy_pci_info(eth_dev, pci_dev);
525 /* Vendor and Device ID need to be set before init of shared code */
526 hw->device_id = pci_dev->id.device_id;
527 hw->vendor_id = pci_dev->id.vendor_id;
528 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
529 hw->allow_unsupported_sfp = 1;
531 /* Reserve memory for interrupt status block */
532 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
533 16, TXGBE_ALIGN, SOCKET_ID_ANY);
537 hw->isb_dma = TMZ_PADDR(mz);
538 hw->isb_mem = TMZ_VADDR(mz);
540 /* Initialize the shared code (base driver) */
541 err = txgbe_init_shared_code(hw);
543 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
547 /* Unlock any pending hardware semaphore */
548 txgbe_swfw_lock_reset(hw);
550 /* Initialize DCB configuration*/
551 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
552 txgbe_dcb_init(hw, dcb_config);
554 /* Get Hardware Flow Control setting */
555 hw->fc.requested_mode = txgbe_fc_full;
556 hw->fc.current_mode = txgbe_fc_full;
557 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
558 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
559 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
560 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
564 err = hw->rom.init_params(hw);
566 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
570 /* Make sure we have a good EEPROM before we read from it */
571 err = hw->rom.validate_checksum(hw, &csum);
573 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
577 err = hw->mac.init_hw(hw);
580 * Devices with copper phys will fail to initialise if txgbe_init_hw()
581 * is called too soon after the kernel driver unbinding/binding occurs.
582 * The failure occurs in txgbe_identify_phy() for all devices,
583 * but for non-copper devies, txgbe_identify_sfp_module() is
584 * also called. See txgbe_identify_phy(). The reason for the
585 * failure is not known, and only occuts when virtualisation features
586 * are disabled in the bios. A delay of 200ms was found to be enough by
587 * trial-and-error, and is doubled to be safe.
589 if (err && hw->phy.media_type == txgbe_media_type_copper) {
591 err = hw->mac.init_hw(hw);
594 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
597 if (err == TXGBE_ERR_EEPROM_VERSION) {
598 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
599 "LOM. Please be aware there may be issues associated "
600 "with your hardware.");
601 PMD_INIT_LOG(ERR, "If you are experiencing problems "
602 "please contact your hardware representative "
603 "who provided you with this hardware.");
604 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
605 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
608 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
612 /* Reset the hw statistics */
613 txgbe_dev_stats_reset(eth_dev);
615 /* disable interrupt */
616 txgbe_disable_intr(hw);
618 /* Allocate memory for storing MAC addresses */
619 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
620 hw->mac.num_rar_entries, 0);
621 if (eth_dev->data->mac_addrs == NULL) {
623 "Failed to allocate %u bytes needed to store "
625 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
629 /* Copy the permanent MAC address */
630 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
631 ð_dev->data->mac_addrs[0]);
633 /* Allocate memory for storing hash filter MAC addresses */
634 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
635 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
636 if (eth_dev->data->hash_mac_addrs == NULL) {
638 "Failed to allocate %d bytes needed to store MAC addresses",
639 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
643 /* initialize the vfta */
644 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
646 /* initialize the hw strip bitmap*/
647 memset(hwstrip, 0, sizeof(*hwstrip));
649 /* initialize PF if max_vfs not zero */
650 ret = txgbe_pf_host_init(eth_dev);
652 rte_free(eth_dev->data->mac_addrs);
653 eth_dev->data->mac_addrs = NULL;
654 rte_free(eth_dev->data->hash_mac_addrs);
655 eth_dev->data->hash_mac_addrs = NULL;
659 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
660 /* let hardware know driver is loaded */
661 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
662 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
663 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
664 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
667 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
668 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
669 (int)hw->mac.type, (int)hw->phy.type,
670 (int)hw->phy.sfp_type);
672 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
673 (int)hw->mac.type, (int)hw->phy.type);
675 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
676 eth_dev->data->port_id, pci_dev->id.vendor_id,
677 pci_dev->id.device_id);
679 rte_intr_callback_register(intr_handle,
680 txgbe_dev_interrupt_handler, eth_dev);
682 /* enable uio/vfio intr/eventfd mapping */
683 rte_intr_enable(intr_handle);
685 /* enable support intr */
686 txgbe_enable_intr(eth_dev);
688 /* initialize filter info */
689 memset(filter_info, 0,
690 sizeof(struct txgbe_filter_info));
692 /* initialize 5tuple filter list */
693 TAILQ_INIT(&filter_info->fivetuple_list);
695 /* initialize flow director filter list & hash */
696 txgbe_fdir_filter_init(eth_dev);
698 /* initialize l2 tunnel filter list & hash */
699 txgbe_l2_tn_filter_init(eth_dev);
701 /* initialize flow filter lists */
702 txgbe_filterlist_init();
704 /* initialize bandwidth configuration info */
705 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
711 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
713 PMD_INIT_FUNC_TRACE();
715 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
718 txgbe_dev_close(eth_dev);
723 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
725 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
726 struct txgbe_5tuple_filter *p_5tuple;
728 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
729 TAILQ_REMOVE(&filter_info->fivetuple_list,
734 memset(filter_info->fivetuple_mask, 0,
735 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
740 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
742 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
743 struct txgbe_fdir_filter *fdir_filter;
745 if (fdir_info->hash_map)
746 rte_free(fdir_info->hash_map);
747 if (fdir_info->hash_handle)
748 rte_hash_free(fdir_info->hash_handle);
750 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
751 TAILQ_REMOVE(&fdir_info->fdir_list,
754 rte_free(fdir_filter);
760 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
762 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
763 struct txgbe_l2_tn_filter *l2_tn_filter;
765 if (l2_tn_info->hash_map)
766 rte_free(l2_tn_info->hash_map);
767 if (l2_tn_info->hash_handle)
768 rte_hash_free(l2_tn_info->hash_handle);
770 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
771 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
774 rte_free(l2_tn_filter);
780 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
782 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
783 char fdir_hash_name[RTE_HASH_NAMESIZE];
784 struct rte_hash_parameters fdir_hash_params = {
785 .name = fdir_hash_name,
786 .entries = TXGBE_MAX_FDIR_FILTER_NUM,
787 .key_len = sizeof(struct txgbe_atr_input),
788 .hash_func = rte_hash_crc,
789 .hash_func_init_val = 0,
790 .socket_id = rte_socket_id(),
793 TAILQ_INIT(&fdir_info->fdir_list);
794 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
795 "fdir_%s", TDEV_NAME(eth_dev));
796 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
797 if (!fdir_info->hash_handle) {
798 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
801 fdir_info->hash_map = rte_zmalloc("txgbe",
802 sizeof(struct txgbe_fdir_filter *) *
803 TXGBE_MAX_FDIR_FILTER_NUM,
805 if (!fdir_info->hash_map) {
807 "Failed to allocate memory for fdir hash map!");
810 fdir_info->mask_added = FALSE;
815 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
817 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
818 char l2_tn_hash_name[RTE_HASH_NAMESIZE];
819 struct rte_hash_parameters l2_tn_hash_params = {
820 .name = l2_tn_hash_name,
821 .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
822 .key_len = sizeof(struct txgbe_l2_tn_key),
823 .hash_func = rte_hash_crc,
824 .hash_func_init_val = 0,
825 .socket_id = rte_socket_id(),
828 TAILQ_INIT(&l2_tn_info->l2_tn_list);
829 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
830 "l2_tn_%s", TDEV_NAME(eth_dev));
831 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
832 if (!l2_tn_info->hash_handle) {
833 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
836 l2_tn_info->hash_map = rte_zmalloc("txgbe",
837 sizeof(struct txgbe_l2_tn_filter *) *
838 TXGBE_MAX_L2_TN_FILTER_NUM,
840 if (!l2_tn_info->hash_map) {
842 "Failed to allocate memory for L2 TN hash map!");
845 l2_tn_info->e_tag_en = FALSE;
846 l2_tn_info->e_tag_fwd_en = FALSE;
847 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
853 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
854 struct rte_pci_device *pci_dev)
856 struct rte_eth_dev *pf_ethdev;
857 struct rte_eth_devargs eth_da;
860 if (pci_dev->device.devargs) {
861 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
866 memset(ð_da, 0, sizeof(eth_da));
869 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
870 sizeof(struct txgbe_adapter),
871 eth_dev_pci_specific_init, pci_dev,
872 eth_txgbe_dev_init, NULL);
874 if (retval || eth_da.nb_representor_ports < 1)
877 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
878 if (pf_ethdev == NULL)
884 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
886 struct rte_eth_dev *ethdev;
888 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
892 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
895 static struct rte_pci_driver rte_txgbe_pmd = {
896 .id_table = pci_id_txgbe_map,
897 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
898 RTE_PCI_DRV_INTR_LSC,
899 .probe = eth_txgbe_pci_probe,
900 .remove = eth_txgbe_pci_remove,
904 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
906 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
907 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
912 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
913 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
914 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
919 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
921 /* update local VFTA copy */
922 shadow_vfta->vfta[vid_idx] = vfta;
928 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
930 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
931 struct txgbe_rx_queue *rxq;
933 uint32_t rxcfg, rxbal, rxbah;
936 txgbe_vlan_hw_strip_enable(dev, queue);
938 txgbe_vlan_hw_strip_disable(dev, queue);
940 rxq = dev->data->rx_queues[queue];
941 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
942 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
943 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
944 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
945 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
946 !(rxcfg & TXGBE_RXCFG_VLAN);
947 rxcfg |= TXGBE_RXCFG_VLAN;
949 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
950 (rxcfg & TXGBE_RXCFG_VLAN);
951 rxcfg &= ~TXGBE_RXCFG_VLAN;
953 rxcfg &= ~TXGBE_RXCFG_ENA;
956 /* set vlan strip for ring */
957 txgbe_dev_rx_queue_stop(dev, queue);
958 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
959 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
960 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
961 txgbe_dev_rx_queue_start(dev, queue);
966 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
967 enum rte_vlan_type vlan_type,
970 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
972 uint32_t portctrl, vlan_ext, qinq;
974 portctrl = rd32(hw, TXGBE_PORTCTL);
976 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
977 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
979 case ETH_VLAN_TYPE_INNER:
981 wr32m(hw, TXGBE_VLANCTL,
982 TXGBE_VLANCTL_TPID_MASK,
983 TXGBE_VLANCTL_TPID(tpid));
984 wr32m(hw, TXGBE_DMATXCTRL,
985 TXGBE_DMATXCTRL_TPID_MASK,
986 TXGBE_DMATXCTRL_TPID(tpid));
989 PMD_DRV_LOG(ERR, "Inner type is not supported"
994 wr32m(hw, TXGBE_TAGTPID(0),
995 TXGBE_TAGTPID_LSB_MASK,
996 TXGBE_TAGTPID_LSB(tpid));
999 case ETH_VLAN_TYPE_OUTER:
1001 /* Only the high 16-bits is valid */
1002 wr32m(hw, TXGBE_EXTAG,
1003 TXGBE_EXTAG_VLAN_MASK,
1004 TXGBE_EXTAG_VLAN(tpid));
1006 wr32m(hw, TXGBE_VLANCTL,
1007 TXGBE_VLANCTL_TPID_MASK,
1008 TXGBE_VLANCTL_TPID(tpid));
1009 wr32m(hw, TXGBE_DMATXCTRL,
1010 TXGBE_DMATXCTRL_TPID_MASK,
1011 TXGBE_DMATXCTRL_TPID(tpid));
1015 wr32m(hw, TXGBE_TAGTPID(0),
1016 TXGBE_TAGTPID_MSB_MASK,
1017 TXGBE_TAGTPID_MSB(tpid));
1021 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1029 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1031 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1034 PMD_INIT_FUNC_TRACE();
1036 /* Filter Table Disable */
1037 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1038 vlnctrl &= ~TXGBE_VLANCTL_VFE;
1039 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1043 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1045 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1046 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1050 PMD_INIT_FUNC_TRACE();
1052 /* Filter Table Enable */
1053 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1054 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1055 vlnctrl |= TXGBE_VLANCTL_VFE;
1056 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1058 /* write whatever is in local vfta copy */
1059 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1060 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1064 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1066 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1067 struct txgbe_rx_queue *rxq;
1069 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1073 TXGBE_SET_HWSTRIP(hwstrip, queue);
1075 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1077 if (queue >= dev->data->nb_rx_queues)
1080 rxq = dev->data->rx_queues[queue];
1083 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1084 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1086 rxq->vlan_flags = PKT_RX_VLAN;
1087 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1092 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1094 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1097 PMD_INIT_FUNC_TRACE();
1099 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1100 ctrl &= ~TXGBE_RXCFG_VLAN;
1101 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1103 /* record those setting for HW strip per queue */
1104 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1108 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1110 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1113 PMD_INIT_FUNC_TRACE();
1115 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1116 ctrl |= TXGBE_RXCFG_VLAN;
1117 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1119 /* record those setting for HW strip per queue */
1120 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1124 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1126 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1129 PMD_INIT_FUNC_TRACE();
1131 ctrl = rd32(hw, TXGBE_PORTCTL);
1132 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1133 ctrl &= ~TXGBE_PORTCTL_QINQ;
1134 wr32(hw, TXGBE_PORTCTL, ctrl);
1138 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1140 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1141 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1142 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1145 PMD_INIT_FUNC_TRACE();
1147 ctrl = rd32(hw, TXGBE_PORTCTL);
1148 ctrl |= TXGBE_PORTCTL_VLANEXT;
1149 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1150 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1151 ctrl |= TXGBE_PORTCTL_QINQ;
1152 wr32(hw, TXGBE_PORTCTL, ctrl);
1156 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1158 struct txgbe_rx_queue *rxq;
1161 PMD_INIT_FUNC_TRACE();
1163 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1164 rxq = dev->data->rx_queues[i];
1166 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1167 txgbe_vlan_strip_queue_set(dev, i, 1);
1169 txgbe_vlan_strip_queue_set(dev, i, 0);
1174 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1177 struct rte_eth_rxmode *rxmode;
1178 struct txgbe_rx_queue *rxq;
1180 if (mask & ETH_VLAN_STRIP_MASK) {
1181 rxmode = &dev->data->dev_conf.rxmode;
1182 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1183 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1184 rxq = dev->data->rx_queues[i];
1185 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1188 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1189 rxq = dev->data->rx_queues[i];
1190 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1196 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1198 struct rte_eth_rxmode *rxmode;
1199 rxmode = &dev->data->dev_conf.rxmode;
1201 if (mask & ETH_VLAN_STRIP_MASK)
1202 txgbe_vlan_hw_strip_config(dev);
1204 if (mask & ETH_VLAN_FILTER_MASK) {
1205 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1206 txgbe_vlan_hw_filter_enable(dev);
1208 txgbe_vlan_hw_filter_disable(dev);
1211 if (mask & ETH_VLAN_EXTEND_MASK) {
1212 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1213 txgbe_vlan_hw_extend_enable(dev);
1215 txgbe_vlan_hw_extend_disable(dev);
1222 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1224 txgbe_config_vlan_strip_on_all_queues(dev, mask);
1226 txgbe_vlan_offload_config(dev, mask);
1232 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1234 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1235 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1236 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1238 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1239 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1243 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1245 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1250 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1253 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1259 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1260 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1261 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1262 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1267 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1269 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1270 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1271 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1273 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1274 /* check multi-queue mode */
1275 switch (dev_conf->rxmode.mq_mode) {
1276 case ETH_MQ_RX_VMDQ_DCB:
1277 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1279 case ETH_MQ_RX_VMDQ_DCB_RSS:
1280 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1281 PMD_INIT_LOG(ERR, "SRIOV active,"
1282 " unsupported mq_mode rx %d.",
1283 dev_conf->rxmode.mq_mode);
1286 case ETH_MQ_RX_VMDQ_RSS:
1287 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1288 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1289 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1290 PMD_INIT_LOG(ERR, "SRIOV is active,"
1291 " invalid queue number"
1292 " for VMDQ RSS, allowed"
1293 " value are 1, 2 or 4.");
1297 case ETH_MQ_RX_VMDQ_ONLY:
1298 case ETH_MQ_RX_NONE:
1299 /* if nothing mq mode configure, use default scheme */
1300 dev->data->dev_conf.rxmode.mq_mode =
1301 ETH_MQ_RX_VMDQ_ONLY;
1303 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1304 /* SRIOV only works in VMDq enable mode */
1305 PMD_INIT_LOG(ERR, "SRIOV is active,"
1306 " wrong mq_mode rx %d.",
1307 dev_conf->rxmode.mq_mode);
1311 switch (dev_conf->txmode.mq_mode) {
1312 case ETH_MQ_TX_VMDQ_DCB:
1313 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1314 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1316 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1317 dev->data->dev_conf.txmode.mq_mode =
1318 ETH_MQ_TX_VMDQ_ONLY;
1322 /* check valid queue number */
1323 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1324 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1325 PMD_INIT_LOG(ERR, "SRIOV is active,"
1326 " nb_rx_q=%d nb_tx_q=%d queue number"
1327 " must be less than or equal to %d.",
1329 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1333 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1334 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1338 /* check configuration for vmdb+dcb mode */
1339 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1340 const struct rte_eth_vmdq_dcb_conf *conf;
1342 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1343 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1344 TXGBE_VMDQ_DCB_NB_QUEUES);
1347 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1348 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1349 conf->nb_queue_pools == ETH_32_POOLS)) {
1350 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1351 " nb_queue_pools must be %d or %d.",
1352 ETH_16_POOLS, ETH_32_POOLS);
1356 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1357 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1359 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1360 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1361 TXGBE_VMDQ_DCB_NB_QUEUES);
1364 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1365 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1366 conf->nb_queue_pools == ETH_32_POOLS)) {
1367 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1368 " nb_queue_pools != %d and"
1369 " nb_queue_pools != %d.",
1370 ETH_16_POOLS, ETH_32_POOLS);
1375 /* For DCB mode check our configuration before we go further */
1376 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1377 const struct rte_eth_dcb_rx_conf *conf;
1379 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1380 if (!(conf->nb_tcs == ETH_4_TCS ||
1381 conf->nb_tcs == ETH_8_TCS)) {
1382 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1383 " and nb_tcs != %d.",
1384 ETH_4_TCS, ETH_8_TCS);
1389 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1390 const struct rte_eth_dcb_tx_conf *conf;
1392 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1393 if (!(conf->nb_tcs == ETH_4_TCS ||
1394 conf->nb_tcs == ETH_8_TCS)) {
1395 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1396 " and nb_tcs != %d.",
1397 ETH_4_TCS, ETH_8_TCS);
1406 txgbe_dev_configure(struct rte_eth_dev *dev)
1408 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1409 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1412 PMD_INIT_FUNC_TRACE();
1414 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1415 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1417 /* multiple queue mode checking */
1418 ret = txgbe_check_mq_mode(dev);
1420 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1425 /* set flag to update link status after init */
1426 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1429 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1430 * allocation Rx preconditions we will reset it.
1432 adapter->rx_bulk_alloc_allowed = true;
1438 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1440 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1441 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1444 gpie = rd32(hw, TXGBE_GPIOINTEN);
1445 gpie |= TXGBE_GPIOBIT_6;
1446 wr32(hw, TXGBE_GPIOINTEN, gpie);
1447 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1451 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1452 uint16_t tx_rate, uint64_t q_msk)
1454 struct txgbe_hw *hw;
1455 struct txgbe_vf_info *vfinfo;
1456 struct rte_eth_link link;
1457 uint8_t nb_q_per_pool;
1458 uint32_t queue_stride;
1459 uint32_t queue_idx, idx = 0, vf_idx;
1461 uint16_t total_rate = 0;
1462 struct rte_pci_device *pci_dev;
1465 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1466 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1470 if (vf >= pci_dev->max_vfs)
1473 if (tx_rate > link.link_speed)
1479 hw = TXGBE_DEV_HW(dev);
1480 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1481 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1482 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1483 queue_idx = vf * queue_stride;
1484 queue_end = queue_idx + nb_q_per_pool - 1;
1485 if (queue_end >= hw->mac.max_tx_queues)
1489 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1492 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1494 total_rate += vfinfo[vf_idx].tx_rate[idx];
1500 /* Store tx_rate for this vf. */
1501 for (idx = 0; idx < nb_q_per_pool; idx++) {
1502 if (((uint64_t)0x1 << idx) & q_msk) {
1503 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1504 vfinfo[vf].tx_rate[idx] = tx_rate;
1505 total_rate += tx_rate;
1509 if (total_rate > dev->data->dev_link.link_speed) {
1510 /* Reset stored TX rate of the VF if it causes exceed
1513 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1517 /* Set ARBTXRATE of each queue/pool for vf X */
1518 for (; queue_idx <= queue_end; queue_idx++) {
1520 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1528 * Configure device link speed and setup link.
1529 * It returns 0 on success.
1532 txgbe_dev_start(struct rte_eth_dev *dev)
1534 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1535 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1536 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1537 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1538 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1539 uint32_t intr_vector = 0;
1541 bool link_up = false, negotiate = 0;
1543 uint32_t allowed_speeds = 0;
1547 uint32_t *link_speeds;
1549 PMD_INIT_FUNC_TRACE();
1551 /* TXGBE devices don't support:
1552 * - half duplex (checked afterwards for valid speeds)
1553 * - fixed speed: TODO implement
1555 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1557 "Invalid link_speeds for port %u, fix speed not supported",
1558 dev->data->port_id);
1562 /* Stop the link setup handler before resetting the HW. */
1563 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1565 /* disable uio/vfio intr/eventfd mapping */
1566 rte_intr_disable(intr_handle);
1569 hw->adapter_stopped = 0;
1572 /* reinitialize adapter
1573 * this calls reset and start
1575 hw->nb_rx_queues = dev->data->nb_rx_queues;
1576 hw->nb_tx_queues = dev->data->nb_tx_queues;
1577 status = txgbe_pf_reset_hw(hw);
1580 hw->mac.start_hw(hw);
1581 hw->mac.get_link_status = true;
1583 /* configure PF module if SRIOV enabled */
1584 txgbe_pf_host_configure(dev);
1586 txgbe_dev_phy_intr_setup(dev);
1588 /* check and configure queue intr-vector mapping */
1589 if ((rte_intr_cap_multiple(intr_handle) ||
1590 !RTE_ETH_DEV_SRIOV(dev).active) &&
1591 dev->data->dev_conf.intr_conf.rxq != 0) {
1592 intr_vector = dev->data->nb_rx_queues;
1593 if (rte_intr_efd_enable(intr_handle, intr_vector))
1597 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1598 intr_handle->intr_vec =
1599 rte_zmalloc("intr_vec",
1600 dev->data->nb_rx_queues * sizeof(int), 0);
1601 if (intr_handle->intr_vec == NULL) {
1602 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1603 " intr_vec", dev->data->nb_rx_queues);
1608 /* confiugre msix for sleep until rx interrupt */
1609 txgbe_configure_msix(dev);
1611 /* initialize transmission unit */
1612 txgbe_dev_tx_init(dev);
1614 /* This can fail when allocating mbufs for descriptor rings */
1615 err = txgbe_dev_rx_init(dev);
1617 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1621 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1622 ETH_VLAN_EXTEND_MASK;
1623 err = txgbe_vlan_offload_config(dev, mask);
1625 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1629 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1630 /* Enable vlan filtering for VMDq */
1631 txgbe_vmdq_vlan_hw_filter_enable(dev);
1634 /* Configure DCB hw */
1635 txgbe_configure_pb(dev);
1636 txgbe_configure_port(dev);
1637 txgbe_configure_dcb(dev);
1639 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1640 err = txgbe_fdir_configure(dev);
1645 /* Restore vf rate limit */
1646 if (vfinfo != NULL) {
1647 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1648 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1649 if (vfinfo[vf].tx_rate[idx] != 0)
1650 txgbe_set_vf_rate_limit(dev, vf,
1651 vfinfo[vf].tx_rate[idx],
1655 err = txgbe_dev_rxtx_start(dev);
1657 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1661 /* Skip link setup if loopback mode is enabled. */
1662 if (hw->mac.type == txgbe_mac_raptor &&
1663 dev->data->dev_conf.lpbk_mode)
1664 goto skip_link_setup;
1666 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1667 err = hw->mac.setup_sfp(hw);
1672 if (hw->phy.media_type == txgbe_media_type_copper) {
1673 /* Turn on the copper */
1674 hw->phy.set_phy_power(hw, true);
1676 /* Turn on the laser */
1677 hw->mac.enable_tx_laser(hw);
1680 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1683 dev->data->dev_link.link_status = link_up;
1685 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1689 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1692 link_speeds = &dev->data->dev_conf.link_speeds;
1693 if (*link_speeds & ~allowed_speeds) {
1694 PMD_INIT_LOG(ERR, "Invalid link setting");
1699 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1700 speed = (TXGBE_LINK_SPEED_100M_FULL |
1701 TXGBE_LINK_SPEED_1GB_FULL |
1702 TXGBE_LINK_SPEED_10GB_FULL);
1704 if (*link_speeds & ETH_LINK_SPEED_10G)
1705 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1706 if (*link_speeds & ETH_LINK_SPEED_5G)
1707 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1708 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1709 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1710 if (*link_speeds & ETH_LINK_SPEED_1G)
1711 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1712 if (*link_speeds & ETH_LINK_SPEED_100M)
1713 speed |= TXGBE_LINK_SPEED_100M_FULL;
1716 err = hw->mac.setup_link(hw, speed, link_up);
1722 if (rte_intr_allow_others(intr_handle)) {
1723 /* check if lsc interrupt is enabled */
1724 if (dev->data->dev_conf.intr_conf.lsc != 0)
1725 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1727 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1728 txgbe_dev_macsec_interrupt_setup(dev);
1729 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1731 rte_intr_callback_unregister(intr_handle,
1732 txgbe_dev_interrupt_handler, dev);
1733 if (dev->data->dev_conf.intr_conf.lsc != 0)
1734 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1735 " no intr multiplex");
1738 /* check if rxq interrupt is enabled */
1739 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1740 rte_intr_dp_is_en(intr_handle))
1741 txgbe_dev_rxq_interrupt_setup(dev);
1743 /* enable uio/vfio intr/eventfd mapping */
1744 rte_intr_enable(intr_handle);
1746 /* resume enabled intr since hw reset */
1747 txgbe_enable_intr(dev);
1748 txgbe_l2_tunnel_conf(dev);
1749 txgbe_filter_restore(dev);
1752 * Update link status right before return, because it may
1753 * start link configuration process in a separate thread.
1755 txgbe_dev_link_update(dev, 0);
1757 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1759 txgbe_read_stats_registers(hw, hw_stats);
1760 hw->offset_loaded = 1;
1765 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1766 txgbe_dev_clear_queues(dev);
1771 * Stop device: disable rx and tx functions to allow for reconfiguring.
1774 txgbe_dev_stop(struct rte_eth_dev *dev)
1776 struct rte_eth_link link;
1777 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1778 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1779 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1780 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1781 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1784 if (hw->adapter_stopped)
1787 PMD_INIT_FUNC_TRACE();
1789 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1791 /* disable interrupts */
1792 txgbe_disable_intr(hw);
1795 txgbe_pf_reset_hw(hw);
1796 hw->adapter_stopped = 0;
1801 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1802 vfinfo[vf].clear_to_send = false;
1804 if (hw->phy.media_type == txgbe_media_type_copper) {
1805 /* Turn off the copper */
1806 hw->phy.set_phy_power(hw, false);
1808 /* Turn off the laser */
1809 hw->mac.disable_tx_laser(hw);
1812 txgbe_dev_clear_queues(dev);
1814 /* Clear stored conf */
1815 dev->data->scattered_rx = 0;
1818 /* Clear recorded link status */
1819 memset(&link, 0, sizeof(link));
1820 rte_eth_linkstatus_set(dev, &link);
1822 if (!rte_intr_allow_others(intr_handle))
1823 /* resume to the default handler */
1824 rte_intr_callback_register(intr_handle,
1825 txgbe_dev_interrupt_handler,
1828 /* Clean datapath event and queue/vec mapping */
1829 rte_intr_efd_disable(intr_handle);
1830 if (intr_handle->intr_vec != NULL) {
1831 rte_free(intr_handle->intr_vec);
1832 intr_handle->intr_vec = NULL;
1835 adapter->rss_reta_updated = 0;
1836 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1838 hw->adapter_stopped = true;
1839 dev->data->dev_started = 0;
1845 * Set device link up: enable tx.
1848 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1850 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1852 if (hw->phy.media_type == txgbe_media_type_copper) {
1853 /* Turn on the copper */
1854 hw->phy.set_phy_power(hw, true);
1856 /* Turn on the laser */
1857 hw->mac.enable_tx_laser(hw);
1858 txgbe_dev_link_update(dev, 0);
1865 * Set device link down: disable tx.
1868 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1870 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1872 if (hw->phy.media_type == txgbe_media_type_copper) {
1873 /* Turn off the copper */
1874 hw->phy.set_phy_power(hw, false);
1876 /* Turn off the laser */
1877 hw->mac.disable_tx_laser(hw);
1878 txgbe_dev_link_update(dev, 0);
1885 * Reset and stop device.
1888 txgbe_dev_close(struct rte_eth_dev *dev)
1890 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1891 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1892 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1896 PMD_INIT_FUNC_TRACE();
1898 txgbe_pf_reset_hw(hw);
1900 ret = txgbe_dev_stop(dev);
1902 txgbe_dev_free_queues(dev);
1904 /* reprogram the RAR[0] in case user changed it. */
1905 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1907 /* Unlock any pending hardware semaphore */
1908 txgbe_swfw_lock_reset(hw);
1910 /* disable uio intr before callback unregister */
1911 rte_intr_disable(intr_handle);
1914 ret = rte_intr_callback_unregister(intr_handle,
1915 txgbe_dev_interrupt_handler, dev);
1916 if (ret >= 0 || ret == -ENOENT) {
1918 } else if (ret != -EAGAIN) {
1920 "intr callback unregister failed: %d",
1924 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1926 /* cancel the delay handler before remove dev */
1927 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1929 /* uninitialize PF if max_vfs not zero */
1930 txgbe_pf_host_uninit(dev);
1932 rte_free(dev->data->mac_addrs);
1933 dev->data->mac_addrs = NULL;
1935 rte_free(dev->data->hash_mac_addrs);
1936 dev->data->hash_mac_addrs = NULL;
1938 /* remove all the fdir filters & hash */
1939 txgbe_fdir_filter_uninit(dev);
1941 /* remove all the L2 tunnel filters & hash */
1942 txgbe_l2_tn_filter_uninit(dev);
1944 /* Remove all ntuple filters of the device */
1945 txgbe_ntuple_filter_uninit(dev);
1954 txgbe_dev_reset(struct rte_eth_dev *dev)
1958 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1959 * its VF to make them align with it. The detailed notification
1960 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1961 * To avoid unexpected behavior in VF, currently reset of PF with
1962 * SR-IOV activation is not supported. It might be supported later.
1964 if (dev->data->sriov.active)
1967 ret = eth_txgbe_dev_uninit(dev);
1971 ret = eth_txgbe_dev_init(dev, NULL);
1976 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1978 uint32_t current_counter = rd32(hw, reg); \
1979 if (current_counter < last_counter) \
1980 current_counter += 0x100000000LL; \
1981 if (!hw->offset_loaded) \
1982 last_counter = current_counter; \
1983 counter = current_counter - last_counter; \
1984 counter &= 0xFFFFFFFFLL; \
1987 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1989 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1990 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1991 uint64_t current_counter = (current_counter_msb << 32) | \
1992 current_counter_lsb; \
1993 if (current_counter < last_counter) \
1994 current_counter += 0x1000000000LL; \
1995 if (!hw->offset_loaded) \
1996 last_counter = current_counter; \
1997 counter = current_counter - last_counter; \
1998 counter &= 0xFFFFFFFFFLL; \
2002 txgbe_read_stats_registers(struct txgbe_hw *hw,
2003 struct txgbe_hw_stats *hw_stats)
2008 for (i = 0; i < hw->nb_rx_queues; i++) {
2009 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2010 hw->qp_last[i].rx_qp_packets,
2011 hw_stats->qp[i].rx_qp_packets);
2012 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2013 hw->qp_last[i].rx_qp_bytes,
2014 hw_stats->qp[i].rx_qp_bytes);
2015 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2016 hw->qp_last[i].rx_qp_mc_packets,
2017 hw_stats->qp[i].rx_qp_mc_packets);
2020 for (i = 0; i < hw->nb_tx_queues; i++) {
2021 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2022 hw->qp_last[i].tx_qp_packets,
2023 hw_stats->qp[i].tx_qp_packets);
2024 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2025 hw->qp_last[i].tx_qp_bytes,
2026 hw_stats->qp[i].tx_qp_bytes);
2029 for (i = 0; i < TXGBE_MAX_UP; i++) {
2030 hw_stats->up[i].rx_up_xon_packets +=
2031 rd32(hw, TXGBE_PBRXUPXON(i));
2032 hw_stats->up[i].rx_up_xoff_packets +=
2033 rd32(hw, TXGBE_PBRXUPXOFF(i));
2034 hw_stats->up[i].tx_up_xon_packets +=
2035 rd32(hw, TXGBE_PBTXUPXON(i));
2036 hw_stats->up[i].tx_up_xoff_packets +=
2037 rd32(hw, TXGBE_PBTXUPXOFF(i));
2038 hw_stats->up[i].tx_up_xon2off_packets +=
2039 rd32(hw, TXGBE_PBTXUPOFF(i));
2040 hw_stats->up[i].rx_up_dropped +=
2041 rd32(hw, TXGBE_PBRXMISS(i));
2043 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2044 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2045 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2046 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2049 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2050 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2052 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2053 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2054 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2057 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2058 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2059 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2061 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2062 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2063 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2065 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2066 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2068 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2069 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2070 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2071 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2072 hw_stats->rx_size_512_to_1023_packets +=
2073 rd64(hw, TXGBE_MACRX512TO1023L);
2074 hw_stats->rx_size_1024_to_max_packets +=
2075 rd64(hw, TXGBE_MACRX1024TOMAXL);
2076 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2077 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2078 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2079 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2080 hw_stats->tx_size_512_to_1023_packets +=
2081 rd64(hw, TXGBE_MACTX512TO1023L);
2082 hw_stats->tx_size_1024_to_max_packets +=
2083 rd64(hw, TXGBE_MACTX1024TOMAXL);
2085 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2086 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2087 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2090 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2091 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2092 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2093 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2096 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2097 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2098 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2099 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2100 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2101 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2102 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2104 /* Flow Director Stats */
2105 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2106 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2107 hw_stats->flow_director_added_filters +=
2108 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2109 hw_stats->flow_director_removed_filters +=
2110 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2111 hw_stats->flow_director_filter_add_errors +=
2112 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2113 hw_stats->flow_director_filter_remove_errors +=
2114 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2117 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2118 hw_stats->tx_macsec_pkts_encrypted +=
2119 rd32(hw, TXGBE_LSECTX_ENCPKT);
2120 hw_stats->tx_macsec_pkts_protected +=
2121 rd32(hw, TXGBE_LSECTX_PROTPKT);
2122 hw_stats->tx_macsec_octets_encrypted +=
2123 rd32(hw, TXGBE_LSECTX_ENCOCT);
2124 hw_stats->tx_macsec_octets_protected +=
2125 rd32(hw, TXGBE_LSECTX_PROTOCT);
2126 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2127 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2128 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2129 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2130 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2131 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2132 hw_stats->rx_macsec_sc_pkts_unchecked +=
2133 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2134 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2135 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2136 for (i = 0; i < 2; i++) {
2137 hw_stats->rx_macsec_sa_pkts_ok +=
2138 rd32(hw, TXGBE_LSECRX_OKPKT(i));
2139 hw_stats->rx_macsec_sa_pkts_invalid +=
2140 rd32(hw, TXGBE_LSECRX_INVPKT(i));
2141 hw_stats->rx_macsec_sa_pkts_notvalid +=
2142 rd32(hw, TXGBE_LSECRX_BADPKT(i));
2144 hw_stats->rx_macsec_sa_pkts_unusedsa +=
2145 rd32(hw, TXGBE_LSECRX_INVSAPKT);
2146 hw_stats->rx_macsec_sa_pkts_notusingsa +=
2147 rd32(hw, TXGBE_LSECRX_BADSAPKT);
2149 hw_stats->rx_total_missed_packets = 0;
2150 for (i = 0; i < TXGBE_MAX_UP; i++) {
2151 hw_stats->rx_total_missed_packets +=
2152 hw_stats->up[i].rx_up_dropped;
2157 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2159 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2160 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2161 struct txgbe_stat_mappings *stat_mappings =
2162 TXGBE_DEV_STAT_MAPPINGS(dev);
2165 txgbe_read_stats_registers(hw, hw_stats);
2170 /* Fill out the rte_eth_stats statistics structure */
2171 stats->ipackets = hw_stats->rx_packets;
2172 stats->ibytes = hw_stats->rx_bytes;
2173 stats->opackets = hw_stats->tx_packets;
2174 stats->obytes = hw_stats->tx_bytes;
2176 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2177 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2178 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2179 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2180 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2181 for (i = 0; i < TXGBE_MAX_QP; i++) {
2182 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2183 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2186 q_map = (stat_mappings->rqsm[n] >> offset)
2187 & QMAP_FIELD_RESERVED_BITS_MASK;
2188 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2189 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2190 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2191 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2193 q_map = (stat_mappings->tqsm[n] >> offset)
2194 & QMAP_FIELD_RESERVED_BITS_MASK;
2195 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2196 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2197 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2198 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2202 stats->imissed = hw_stats->rx_total_missed_packets;
2203 stats->ierrors = hw_stats->rx_crc_errors +
2204 hw_stats->rx_mac_short_packet_dropped +
2205 hw_stats->rx_length_errors +
2206 hw_stats->rx_undersize_errors +
2207 hw_stats->rx_oversize_errors +
2208 hw_stats->rx_drop_packets +
2209 hw_stats->rx_illegal_byte_errors +
2210 hw_stats->rx_error_bytes +
2211 hw_stats->rx_fragment_errors +
2212 hw_stats->rx_fcoe_crc_errors +
2213 hw_stats->rx_fcoe_mbuf_allocation_errors;
2221 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2223 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2224 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2226 /* HW registers are cleared on read */
2227 hw->offset_loaded = 0;
2228 txgbe_dev_stats_get(dev, NULL);
2229 hw->offset_loaded = 1;
2231 /* Reset software totals */
2232 memset(hw_stats, 0, sizeof(*hw_stats));
2237 /* This function calculates the number of xstats based on the current config */
2239 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2241 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2242 return TXGBE_NB_HW_STATS +
2243 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2244 TXGBE_NB_QP_STATS * nb_queues;
2248 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2252 /* Extended stats from txgbe_hw_stats */
2253 if (id < TXGBE_NB_HW_STATS) {
2254 snprintf(name, size, "[hw]%s",
2255 rte_txgbe_stats_strings[id].name);
2258 id -= TXGBE_NB_HW_STATS;
2260 /* Priority Stats */
2261 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2262 nb = id / TXGBE_NB_UP_STATS;
2263 st = id % TXGBE_NB_UP_STATS;
2264 snprintf(name, size, "[p%u]%s", nb,
2265 rte_txgbe_up_strings[st].name);
2268 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2271 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2272 nb = id / TXGBE_NB_QP_STATS;
2273 st = id % TXGBE_NB_QP_STATS;
2274 snprintf(name, size, "[q%u]%s", nb,
2275 rte_txgbe_qp_strings[st].name);
2278 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2280 return -(int)(id + 1);
2284 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2288 /* Extended stats from txgbe_hw_stats */
2289 if (id < TXGBE_NB_HW_STATS) {
2290 *offset = rte_txgbe_stats_strings[id].offset;
2293 id -= TXGBE_NB_HW_STATS;
2295 /* Priority Stats */
2296 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2297 nb = id / TXGBE_NB_UP_STATS;
2298 st = id % TXGBE_NB_UP_STATS;
2299 *offset = rte_txgbe_up_strings[st].offset +
2300 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2303 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2306 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2307 nb = id / TXGBE_NB_QP_STATS;
2308 st = id % TXGBE_NB_QP_STATS;
2309 *offset = rte_txgbe_qp_strings[st].offset +
2310 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2317 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2318 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2320 unsigned int i, count;
2322 count = txgbe_xstats_calc_num(dev);
2323 if (xstats_names == NULL)
2326 /* Note: limit >= cnt_stats checked upstream
2327 * in rte_eth_xstats_names()
2329 limit = min(limit, count);
2331 /* Extended stats from txgbe_hw_stats */
2332 for (i = 0; i < limit; i++) {
2333 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2334 sizeof(xstats_names[i].name))) {
2335 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2343 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2344 struct rte_eth_xstat_name *xstats_names,
2345 const uint64_t *ids,
2351 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2353 for (i = 0; i < limit; i++) {
2354 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2355 sizeof(xstats_names[i].name))) {
2356 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2365 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2368 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2369 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2370 unsigned int i, count;
2372 txgbe_read_stats_registers(hw, hw_stats);
2374 /* If this is a reset xstats is NULL, and we have cleared the
2375 * registers by reading them.
2377 count = txgbe_xstats_calc_num(dev);
2381 limit = min(limit, txgbe_xstats_calc_num(dev));
2383 /* Extended stats from txgbe_hw_stats */
2384 for (i = 0; i < limit; i++) {
2385 uint32_t offset = 0;
2387 if (txgbe_get_offset_by_id(i, &offset)) {
2388 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2391 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2399 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2402 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2403 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2404 unsigned int i, count;
2406 txgbe_read_stats_registers(hw, hw_stats);
2408 /* If this is a reset xstats is NULL, and we have cleared the
2409 * registers by reading them.
2411 count = txgbe_xstats_calc_num(dev);
2415 limit = min(limit, txgbe_xstats_calc_num(dev));
2417 /* Extended stats from txgbe_hw_stats */
2418 for (i = 0; i < limit; i++) {
2421 if (txgbe_get_offset_by_id(i, &offset)) {
2422 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2425 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2432 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2433 uint64_t *values, unsigned int limit)
2435 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2439 return txgbe_dev_xstats_get_(dev, values, limit);
2441 for (i = 0; i < limit; i++) {
2444 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2445 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2448 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2455 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2457 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2458 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2460 /* HW registers are cleared on read */
2461 hw->offset_loaded = 0;
2462 txgbe_read_stats_registers(hw, hw_stats);
2463 hw->offset_loaded = 1;
2465 /* Reset software totals */
2466 memset(hw_stats, 0, sizeof(*hw_stats));
2472 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2474 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2475 u16 eeprom_verh, eeprom_verl;
2479 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2480 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2482 etrack_id = (eeprom_verh << 16) | eeprom_verl;
2483 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2485 ret += 1; /* add the size of '\0' */
2486 if (fw_size < (u32)ret)
2493 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2495 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2496 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2498 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2499 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2500 dev_info->min_rx_bufsize = 1024;
2501 dev_info->max_rx_pktlen = 15872;
2502 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2503 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2504 dev_info->max_vfs = pci_dev->max_vfs;
2505 dev_info->max_vmdq_pools = ETH_64_POOLS;
2506 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2507 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2508 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2509 dev_info->rx_queue_offload_capa);
2510 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2511 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2513 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2515 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2516 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2517 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2519 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2524 dev_info->default_txconf = (struct rte_eth_txconf) {
2526 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2527 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2528 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2530 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2534 dev_info->rx_desc_lim = rx_desc_lim;
2535 dev_info->tx_desc_lim = tx_desc_lim;
2537 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2538 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2539 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2541 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2542 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2544 /* Driver-preferred Rx/Tx parameters */
2545 dev_info->default_rxportconf.burst_size = 32;
2546 dev_info->default_txportconf.burst_size = 32;
2547 dev_info->default_rxportconf.nb_queues = 1;
2548 dev_info->default_txportconf.nb_queues = 1;
2549 dev_info->default_rxportconf.ring_size = 256;
2550 dev_info->default_txportconf.ring_size = 256;
2556 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2558 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2559 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2560 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2561 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2562 return txgbe_get_supported_ptypes();
2568 txgbe_dev_setup_link_alarm_handler(void *param)
2570 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2571 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2572 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2574 bool autoneg = false;
2576 speed = hw->phy.autoneg_advertised;
2578 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2580 hw->mac.setup_link(hw, speed, true);
2582 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2585 /* return 0 means link status changed, -1 means not changed */
2587 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2588 int wait_to_complete)
2590 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2591 struct rte_eth_link link;
2592 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2593 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2598 memset(&link, 0, sizeof(link));
2599 link.link_status = ETH_LINK_DOWN;
2600 link.link_speed = ETH_SPEED_NUM_NONE;
2601 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2602 link.link_autoneg = ETH_LINK_AUTONEG;
2604 hw->mac.get_link_status = true;
2606 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2607 return rte_eth_linkstatus_set(dev, &link);
2609 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2610 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2613 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2616 link.link_speed = ETH_SPEED_NUM_100M;
2617 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2618 return rte_eth_linkstatus_set(dev, &link);
2622 if (hw->phy.media_type == txgbe_media_type_fiber) {
2623 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2624 rte_eal_alarm_set(10,
2625 txgbe_dev_setup_link_alarm_handler, dev);
2627 return rte_eth_linkstatus_set(dev, &link);
2630 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2631 link.link_status = ETH_LINK_UP;
2632 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2634 switch (link_speed) {
2636 case TXGBE_LINK_SPEED_UNKNOWN:
2637 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2638 link.link_speed = ETH_SPEED_NUM_100M;
2641 case TXGBE_LINK_SPEED_100M_FULL:
2642 link.link_speed = ETH_SPEED_NUM_100M;
2645 case TXGBE_LINK_SPEED_1GB_FULL:
2646 link.link_speed = ETH_SPEED_NUM_1G;
2649 case TXGBE_LINK_SPEED_2_5GB_FULL:
2650 link.link_speed = ETH_SPEED_NUM_2_5G;
2653 case TXGBE_LINK_SPEED_5GB_FULL:
2654 link.link_speed = ETH_SPEED_NUM_5G;
2657 case TXGBE_LINK_SPEED_10GB_FULL:
2658 link.link_speed = ETH_SPEED_NUM_10G;
2662 return rte_eth_linkstatus_set(dev, &link);
2666 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2668 return txgbe_dev_link_update_share(dev, wait_to_complete);
2672 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2674 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2677 fctrl = rd32(hw, TXGBE_PSRCTL);
2678 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2679 wr32(hw, TXGBE_PSRCTL, fctrl);
2685 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2687 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2690 fctrl = rd32(hw, TXGBE_PSRCTL);
2691 fctrl &= (~TXGBE_PSRCTL_UCP);
2692 if (dev->data->all_multicast == 1)
2693 fctrl |= TXGBE_PSRCTL_MCP;
2695 fctrl &= (~TXGBE_PSRCTL_MCP);
2696 wr32(hw, TXGBE_PSRCTL, fctrl);
2702 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2704 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2707 fctrl = rd32(hw, TXGBE_PSRCTL);
2708 fctrl |= TXGBE_PSRCTL_MCP;
2709 wr32(hw, TXGBE_PSRCTL, fctrl);
2715 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2717 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2720 if (dev->data->promiscuous == 1)
2721 return 0; /* must remain in all_multicast mode */
2723 fctrl = rd32(hw, TXGBE_PSRCTL);
2724 fctrl &= (~TXGBE_PSRCTL_MCP);
2725 wr32(hw, TXGBE_PSRCTL, fctrl);
2731 * It clears the interrupt causes and enables the interrupt.
2732 * It will be called once only during nic initialized.
2735 * Pointer to struct rte_eth_dev.
2737 * Enable or Disable.
2740 * - On success, zero.
2741 * - On failure, a negative value.
2744 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2746 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2748 txgbe_dev_link_status_print(dev);
2750 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2752 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2758 * It clears the interrupt causes and enables the interrupt.
2759 * It will be called once only during nic initialized.
2762 * Pointer to struct rte_eth_dev.
2765 * - On success, zero.
2766 * - On failure, a negative value.
2769 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2771 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2773 intr->mask[0] |= TXGBE_ICR_MASK;
2774 intr->mask[1] |= TXGBE_ICR_MASK;
2780 * It clears the interrupt causes and enables the interrupt.
2781 * It will be called once only during nic initialized.
2784 * Pointer to struct rte_eth_dev.
2787 * - On success, zero.
2788 * - On failure, a negative value.
2791 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2793 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2795 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2801 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2804 * Pointer to struct rte_eth_dev.
2807 * - On success, zero.
2808 * - On failure, a negative value.
2811 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2814 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2815 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2817 /* clear all cause mask */
2818 txgbe_disable_intr(hw);
2820 /* read-on-clear nic registers here */
2821 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2822 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2826 /* set flag for async link update */
2827 if (eicr & TXGBE_ICRMISC_LSC)
2828 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2830 if (eicr & TXGBE_ICRMISC_VFMBX)
2831 intr->flags |= TXGBE_FLAG_MAILBOX;
2833 if (eicr & TXGBE_ICRMISC_LNKSEC)
2834 intr->flags |= TXGBE_FLAG_MACSEC;
2836 if (eicr & TXGBE_ICRMISC_GPIO)
2837 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2843 * It gets and then prints the link status.
2846 * Pointer to struct rte_eth_dev.
2849 * - On success, zero.
2850 * - On failure, a negative value.
2853 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2855 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2856 struct rte_eth_link link;
2858 rte_eth_linkstatus_get(dev, &link);
2860 if (link.link_status) {
2861 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2862 (int)(dev->data->port_id),
2863 (unsigned int)link.link_speed,
2864 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2865 "full-duplex" : "half-duplex");
2867 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2868 (int)(dev->data->port_id));
2870 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2871 pci_dev->addr.domain,
2873 pci_dev->addr.devid,
2874 pci_dev->addr.function);
2878 * It executes link_update after knowing an interrupt occurred.
2881 * Pointer to struct rte_eth_dev.
2884 * - On success, zero.
2885 * - On failure, a negative value.
2888 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2889 struct rte_intr_handle *intr_handle)
2891 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2893 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2895 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2897 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2898 txgbe_pf_mbx_process(dev);
2899 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2902 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2903 hw->phy.handle_lasi(hw);
2904 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2907 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2908 struct rte_eth_link link;
2910 /*get the link status before link update, for predicting later*/
2911 rte_eth_linkstatus_get(dev, &link);
2913 txgbe_dev_link_update(dev, 0);
2916 if (!link.link_status)
2917 /* handle it 1 sec later, wait it being stable */
2918 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2919 /* likely to down */
2921 /* handle it 4 sec later, wait it being stable */
2922 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2924 txgbe_dev_link_status_print(dev);
2925 if (rte_eal_alarm_set(timeout * 1000,
2926 txgbe_dev_interrupt_delayed_handler,
2928 PMD_DRV_LOG(ERR, "Error setting alarm");
2930 /* remember original mask */
2931 intr->mask_misc_orig = intr->mask_misc;
2932 /* only disable lsc interrupt */
2933 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2937 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2938 txgbe_enable_intr(dev);
2939 rte_intr_enable(intr_handle);
2945 * Interrupt handler which shall be registered for alarm callback for delayed
2946 * handling specific interrupt to wait for the stable nic state. As the
2947 * NIC interrupt state is not stable for txgbe after link is just down,
2948 * it needs to wait 4 seconds to get the stable status.
2951 * Pointer to interrupt handle.
2953 * The address of parameter (struct rte_eth_dev *) registered before.
2959 txgbe_dev_interrupt_delayed_handler(void *param)
2961 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2962 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2963 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2964 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2965 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2968 txgbe_disable_intr(hw);
2970 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2971 if (eicr & TXGBE_ICRMISC_VFMBX)
2972 txgbe_pf_mbx_process(dev);
2974 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2975 hw->phy.handle_lasi(hw);
2976 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2979 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2980 txgbe_dev_link_update(dev, 0);
2981 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2982 txgbe_dev_link_status_print(dev);
2983 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2987 if (intr->flags & TXGBE_FLAG_MACSEC) {
2988 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2990 intr->flags &= ~TXGBE_FLAG_MACSEC;
2993 /* restore original mask */
2994 intr->mask_misc = intr->mask_misc_orig;
2995 intr->mask_misc_orig = 0;
2997 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2998 txgbe_enable_intr(dev);
2999 rte_intr_enable(intr_handle);
3003 * Interrupt handler triggered by NIC for handling
3004 * specific interrupt.
3007 * Pointer to interrupt handle.
3009 * The address of parameter (struct rte_eth_dev *) registered before.
3015 txgbe_dev_interrupt_handler(void *param)
3017 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3019 txgbe_dev_interrupt_get_status(dev);
3020 txgbe_dev_interrupt_action(dev, dev->intr_handle);
3024 txgbe_dev_led_on(struct rte_eth_dev *dev)
3026 struct txgbe_hw *hw;
3028 hw = TXGBE_DEV_HW(dev);
3029 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3033 txgbe_dev_led_off(struct rte_eth_dev *dev)
3035 struct txgbe_hw *hw;
3037 hw = TXGBE_DEV_HW(dev);
3038 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3042 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3044 struct txgbe_hw *hw;
3050 hw = TXGBE_DEV_HW(dev);
3052 fc_conf->pause_time = hw->fc.pause_time;
3053 fc_conf->high_water = hw->fc.high_water[0];
3054 fc_conf->low_water = hw->fc.low_water[0];
3055 fc_conf->send_xon = hw->fc.send_xon;
3056 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3059 * Return rx_pause status according to actual setting of
3062 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3063 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3069 * Return tx_pause status according to actual setting of
3072 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3073 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3078 if (rx_pause && tx_pause)
3079 fc_conf->mode = RTE_FC_FULL;
3081 fc_conf->mode = RTE_FC_RX_PAUSE;
3083 fc_conf->mode = RTE_FC_TX_PAUSE;
3085 fc_conf->mode = RTE_FC_NONE;
3091 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3093 struct txgbe_hw *hw;
3095 uint32_t rx_buf_size;
3096 uint32_t max_high_water;
3097 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3104 PMD_INIT_FUNC_TRACE();
3106 hw = TXGBE_DEV_HW(dev);
3107 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3108 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3111 * At least reserve one Ethernet frame for watermark
3112 * high_water/low_water in kilo bytes for txgbe
3114 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3115 if (fc_conf->high_water > max_high_water ||
3116 fc_conf->high_water < fc_conf->low_water) {
3117 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3118 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3122 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3123 hw->fc.pause_time = fc_conf->pause_time;
3124 hw->fc.high_water[0] = fc_conf->high_water;
3125 hw->fc.low_water[0] = fc_conf->low_water;
3126 hw->fc.send_xon = fc_conf->send_xon;
3127 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3129 err = txgbe_fc_enable(hw);
3131 /* Not negotiated is not an error case */
3132 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3133 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3134 (fc_conf->mac_ctrl_frame_fwd
3135 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3141 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3146 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3147 struct rte_eth_pfc_conf *pfc_conf)
3150 uint32_t rx_buf_size;
3151 uint32_t max_high_water;
3153 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
3154 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3155 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3157 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3164 PMD_INIT_FUNC_TRACE();
3166 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3167 tc_num = map[pfc_conf->priority];
3168 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3169 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3171 * At least reserve one Ethernet frame for watermark
3172 * high_water/low_water in kilo bytes for txgbe
3174 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3175 if (pfc_conf->fc.high_water > max_high_water ||
3176 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3177 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3178 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3182 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3183 hw->fc.pause_time = pfc_conf->fc.pause_time;
3184 hw->fc.send_xon = pfc_conf->fc.send_xon;
3185 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3186 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3188 err = txgbe_dcb_pfc_enable(hw, tc_num);
3190 /* Not negotiated is not an error case */
3191 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3194 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3199 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3200 struct rte_eth_rss_reta_entry64 *reta_conf,
3205 uint16_t idx, shift;
3206 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3207 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3209 PMD_INIT_FUNC_TRACE();
3211 if (!txgbe_rss_update_sp(hw->mac.type)) {
3212 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3217 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3218 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3219 "(%d) doesn't match the number hardware can supported "
3220 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3224 for (i = 0; i < reta_size; i += 4) {
3225 idx = i / RTE_RETA_GROUP_SIZE;
3226 shift = i % RTE_RETA_GROUP_SIZE;
3227 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3231 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3232 for (j = 0; j < 4; j++) {
3233 if (RS8(mask, j, 0x1)) {
3234 reta &= ~(MS32(8 * j, 0xFF));
3235 reta |= LS32(reta_conf[idx].reta[shift + j],
3239 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3241 adapter->rss_reta_updated = 1;
3247 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3248 struct rte_eth_rss_reta_entry64 *reta_conf,
3251 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3254 uint16_t idx, shift;
3256 PMD_INIT_FUNC_TRACE();
3258 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3259 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3260 "(%d) doesn't match the number hardware can supported "
3261 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3265 for (i = 0; i < reta_size; i += 4) {
3266 idx = i / RTE_RETA_GROUP_SIZE;
3267 shift = i % RTE_RETA_GROUP_SIZE;
3268 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3272 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3273 for (j = 0; j < 4; j++) {
3274 if (RS8(mask, j, 0x1))
3275 reta_conf[idx].reta[shift + j] =
3276 (uint16_t)RS32(reta, 8 * j, 0xFF);
3284 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3285 uint32_t index, uint32_t pool)
3287 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3288 uint32_t enable_addr = 1;
3290 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3295 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3297 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3299 txgbe_clear_rar(hw, index);
3303 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3305 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3307 txgbe_remove_rar(dev, 0);
3308 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3314 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3316 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3317 struct rte_eth_dev_info dev_info;
3318 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3319 struct rte_eth_dev_data *dev_data = dev->data;
3322 ret = txgbe_dev_info_get(dev, &dev_info);
3326 /* check that mtu is within the allowed range */
3327 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3330 /* If device is started, refuse mtu that requires the support of
3331 * scattered packets when this feature has not been enabled before.
3333 if (dev_data->dev_started && !dev_data->scattered_rx &&
3334 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3335 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3336 PMD_INIT_LOG(ERR, "Stop port first.");
3340 /* update max frame size */
3341 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3344 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3345 TXGBE_FRAME_SIZE_MAX);
3347 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3348 TXGBE_FRMSZ_MAX(frame_size));
3354 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3356 uint32_t vector = 0;
3358 switch (hw->mac.mc_filter_type) {
3359 case 0: /* use bits [47:36] of the address */
3360 vector = ((uc_addr->addr_bytes[4] >> 4) |
3361 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3363 case 1: /* use bits [46:35] of the address */
3364 vector = ((uc_addr->addr_bytes[4] >> 3) |
3365 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3367 case 2: /* use bits [45:34] of the address */
3368 vector = ((uc_addr->addr_bytes[4] >> 2) |
3369 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3371 case 3: /* use bits [43:32] of the address */
3372 vector = ((uc_addr->addr_bytes[4]) |
3373 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3375 default: /* Invalid mc_filter_type */
3379 /* vector can only be 12-bits or boundary will be exceeded */
3385 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3386 struct rte_ether_addr *mac_addr, uint8_t on)
3394 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3395 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3397 /* The UTA table only exists on pf hardware */
3398 if (hw->mac.type < txgbe_mac_raptor)
3401 vector = txgbe_uta_vector(hw, mac_addr);
3402 uta_idx = (vector >> 5) & 0x7F;
3403 uta_mask = 0x1UL << (vector & 0x1F);
3405 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3408 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3410 uta_info->uta_in_use++;
3411 reg_val |= uta_mask;
3412 uta_info->uta_shadow[uta_idx] |= uta_mask;
3414 uta_info->uta_in_use--;
3415 reg_val &= ~uta_mask;
3416 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3419 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3421 psrctl = rd32(hw, TXGBE_PSRCTL);
3422 if (uta_info->uta_in_use > 0)
3423 psrctl |= TXGBE_PSRCTL_UCHFENA;
3425 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3427 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3428 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3429 wr32(hw, TXGBE_PSRCTL, psrctl);
3435 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3437 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3438 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3442 /* The UTA table only exists on pf hardware */
3443 if (hw->mac.type < txgbe_mac_raptor)
3447 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3448 uta_info->uta_shadow[i] = ~0;
3449 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3452 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3453 uta_info->uta_shadow[i] = 0;
3454 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3458 psrctl = rd32(hw, TXGBE_PSRCTL);
3460 psrctl |= TXGBE_PSRCTL_UCHFENA;
3462 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3464 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3465 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3466 wr32(hw, TXGBE_PSRCTL, psrctl);
3472 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3474 uint32_t new_val = orig_val;
3476 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3477 new_val |= TXGBE_POOLETHCTL_UTA;
3478 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3479 new_val |= TXGBE_POOLETHCTL_MCHA;
3480 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3481 new_val |= TXGBE_POOLETHCTL_UCHA;
3482 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3483 new_val |= TXGBE_POOLETHCTL_BCA;
3484 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3485 new_val |= TXGBE_POOLETHCTL_MCP;
3491 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3493 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3494 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3496 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3498 if (queue_id < 32) {
3499 mask = rd32(hw, TXGBE_IMS(0));
3500 mask &= (1 << queue_id);
3501 wr32(hw, TXGBE_IMS(0), mask);
3502 } else if (queue_id < 64) {
3503 mask = rd32(hw, TXGBE_IMS(1));
3504 mask &= (1 << (queue_id - 32));
3505 wr32(hw, TXGBE_IMS(1), mask);
3507 rte_intr_enable(intr_handle);
3513 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3516 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3518 if (queue_id < 32) {
3519 mask = rd32(hw, TXGBE_IMS(0));
3520 mask &= ~(1 << queue_id);
3521 wr32(hw, TXGBE_IMS(0), mask);
3522 } else if (queue_id < 64) {
3523 mask = rd32(hw, TXGBE_IMS(1));
3524 mask &= ~(1 << (queue_id - 32));
3525 wr32(hw, TXGBE_IMS(1), mask);
3532 * set the IVAR registers, mapping interrupt causes to vectors
3534 * pointer to txgbe_hw struct
3536 * 0 for Rx, 1 for Tx, -1 for other causes
3538 * queue to map the corresponding interrupt to
3540 * the vector to map to the corresponding queue
3543 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3544 uint8_t queue, uint8_t msix_vector)
3548 if (direction == -1) {
3550 msix_vector |= TXGBE_IVARMISC_VLD;
3552 tmp = rd32(hw, TXGBE_IVARMISC);
3553 tmp &= ~(0xFF << idx);
3554 tmp |= (msix_vector << idx);
3555 wr32(hw, TXGBE_IVARMISC, tmp);
3557 /* rx or tx causes */
3558 /* Workround for ICR lost */
3559 idx = ((16 * (queue & 1)) + (8 * direction));
3560 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3561 tmp &= ~(0xFF << idx);
3562 tmp |= (msix_vector << idx);
3563 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3568 * Sets up the hardware to properly generate MSI-X interrupts
3570 * board private structure
3573 txgbe_configure_msix(struct rte_eth_dev *dev)
3575 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3576 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3577 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3578 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3579 uint32_t vec = TXGBE_MISC_VEC_ID;
3582 /* won't configure msix register if no mapping is done
3583 * between intr vector and event fd
3584 * but if misx has been enabled already, need to configure
3585 * auto clean, auto mask and throttling.
3587 gpie = rd32(hw, TXGBE_GPIE);
3588 if (!rte_intr_dp_is_en(intr_handle) &&
3589 !(gpie & TXGBE_GPIE_MSIX))
3592 if (rte_intr_allow_others(intr_handle)) {
3593 base = TXGBE_RX_VEC_START;
3597 /* setup GPIE for MSI-x mode */
3598 gpie = rd32(hw, TXGBE_GPIE);
3599 gpie |= TXGBE_GPIE_MSIX;
3600 wr32(hw, TXGBE_GPIE, gpie);
3602 /* Populate the IVAR table and set the ITR values to the
3603 * corresponding register.
3605 if (rte_intr_dp_is_en(intr_handle)) {
3606 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3608 /* by default, 1:1 mapping */
3609 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3610 intr_handle->intr_vec[queue_id] = vec;
3611 if (vec < base + intr_handle->nb_efd - 1)
3615 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3617 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3618 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3623 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3624 uint16_t queue_idx, uint16_t tx_rate)
3626 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3629 if (queue_idx >= hw->mac.max_tx_queues)
3633 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3634 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3640 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3641 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3643 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3645 /* Set ARBTXRATE of queue X */
3646 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3647 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3654 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3655 struct rte_eth_syn_filter *filter,
3658 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3659 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3663 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3666 syn_info = filter_info->syn_info;
3669 if (syn_info & TXGBE_SYNCLS_ENA)
3671 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3672 synqf |= TXGBE_SYNCLS_ENA;
3674 if (filter->hig_pri)
3675 synqf |= TXGBE_SYNCLS_HIPRIO;
3677 synqf &= ~TXGBE_SYNCLS_HIPRIO;
3679 synqf = rd32(hw, TXGBE_SYNCLS);
3680 if (!(syn_info & TXGBE_SYNCLS_ENA))
3682 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3685 filter_info->syn_info = synqf;
3686 wr32(hw, TXGBE_SYNCLS, synqf);
3691 static inline enum txgbe_5tuple_protocol
3692 convert_protocol_type(uint8_t protocol_value)
3694 if (protocol_value == IPPROTO_TCP)
3695 return TXGBE_5TF_PROT_TCP;
3696 else if (protocol_value == IPPROTO_UDP)
3697 return TXGBE_5TF_PROT_UDP;
3698 else if (protocol_value == IPPROTO_SCTP)
3699 return TXGBE_5TF_PROT_SCTP;
3701 return TXGBE_5TF_PROT_NONE;
3704 /* inject a 5-tuple filter to HW */
3706 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3707 struct txgbe_5tuple_filter *filter)
3709 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3711 uint32_t ftqf, sdpqf;
3712 uint32_t l34timir = 0;
3713 uint32_t mask = TXGBE_5TFCTL0_MASK;
3716 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3717 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3719 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3720 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3721 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3722 mask &= ~TXGBE_5TFCTL0_MSADDR;
3723 if (filter->filter_info.dst_ip_mask == 0)
3724 mask &= ~TXGBE_5TFCTL0_MDADDR;
3725 if (filter->filter_info.src_port_mask == 0)
3726 mask &= ~TXGBE_5TFCTL0_MSPORT;
3727 if (filter->filter_info.dst_port_mask == 0)
3728 mask &= ~TXGBE_5TFCTL0_MDPORT;
3729 if (filter->filter_info.proto_mask == 0)
3730 mask &= ~TXGBE_5TFCTL0_MPROTO;
3732 ftqf |= TXGBE_5TFCTL0_MPOOL;
3733 ftqf |= TXGBE_5TFCTL0_ENA;
3735 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3736 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3737 wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3738 wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3740 l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3741 wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3745 * add a 5tuple filter
3748 * dev: Pointer to struct rte_eth_dev.
3749 * index: the index the filter allocates.
3750 * filter: pointer to the filter that will be added.
3751 * rx_queue: the queue id the filter assigned to.
3754 * - On success, zero.
3755 * - On failure, a negative value.
3758 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3759 struct txgbe_5tuple_filter *filter)
3761 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3765 * look for an unused 5tuple filter index,
3766 * and insert the filter to list.
3768 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3769 idx = i / (sizeof(uint32_t) * NBBY);
3770 shift = i % (sizeof(uint32_t) * NBBY);
3771 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3772 filter_info->fivetuple_mask[idx] |= 1 << shift;
3774 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3780 if (i >= TXGBE_MAX_FTQF_FILTERS) {
3781 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3785 txgbe_inject_5tuple_filter(dev, filter);
3791 * remove a 5tuple filter
3794 * dev: Pointer to struct rte_eth_dev.
3795 * filter: the pointer of the filter will be removed.
3798 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3799 struct txgbe_5tuple_filter *filter)
3801 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3802 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3803 uint16_t index = filter->index;
3805 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3806 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3807 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3810 wr32(hw, TXGBE_5TFDADDR(index), 0);
3811 wr32(hw, TXGBE_5TFSADDR(index), 0);
3812 wr32(hw, TXGBE_5TFPORT(index), 0);
3813 wr32(hw, TXGBE_5TFCTL0(index), 0);
3814 wr32(hw, TXGBE_5TFCTL1(index), 0);
3817 static inline struct txgbe_5tuple_filter *
3818 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3819 struct txgbe_5tuple_filter_info *key)
3821 struct txgbe_5tuple_filter *it;
3823 TAILQ_FOREACH(it, filter_list, entries) {
3824 if (memcmp(key, &it->filter_info,
3825 sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3832 /* translate elements in struct rte_eth_ntuple_filter
3833 * to struct txgbe_5tuple_filter_info
3836 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3837 struct txgbe_5tuple_filter_info *filter_info)
3839 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3840 filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3841 filter->priority < TXGBE_5TUPLE_MIN_PRI)
3844 switch (filter->dst_ip_mask) {
3846 filter_info->dst_ip_mask = 0;
3847 filter_info->dst_ip = filter->dst_ip;
3850 filter_info->dst_ip_mask = 1;
3853 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3857 switch (filter->src_ip_mask) {
3859 filter_info->src_ip_mask = 0;
3860 filter_info->src_ip = filter->src_ip;
3863 filter_info->src_ip_mask = 1;
3866 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3870 switch (filter->dst_port_mask) {
3872 filter_info->dst_port_mask = 0;
3873 filter_info->dst_port = filter->dst_port;
3876 filter_info->dst_port_mask = 1;
3879 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3883 switch (filter->src_port_mask) {
3885 filter_info->src_port_mask = 0;
3886 filter_info->src_port = filter->src_port;
3889 filter_info->src_port_mask = 1;
3892 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3896 switch (filter->proto_mask) {
3898 filter_info->proto_mask = 0;
3899 filter_info->proto =
3900 convert_protocol_type(filter->proto);
3903 filter_info->proto_mask = 1;
3906 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3910 filter_info->priority = (uint8_t)filter->priority;
3915 * add or delete a ntuple filter
3918 * dev: Pointer to struct rte_eth_dev.
3919 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3920 * add: if true, add filter, if false, remove filter
3923 * - On success, zero.
3924 * - On failure, a negative value.
3927 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
3928 struct rte_eth_ntuple_filter *ntuple_filter,
3931 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3932 struct txgbe_5tuple_filter_info filter_5tuple;
3933 struct txgbe_5tuple_filter *filter;
3936 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
3937 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3941 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
3942 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
3946 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
3948 if (filter != NULL && add) {
3949 PMD_DRV_LOG(ERR, "filter exists.");
3952 if (filter == NULL && !add) {
3953 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3958 filter = rte_zmalloc("txgbe_5tuple_filter",
3959 sizeof(struct txgbe_5tuple_filter), 0);
3962 rte_memcpy(&filter->filter_info,
3964 sizeof(struct txgbe_5tuple_filter_info));
3965 filter->queue = ntuple_filter->queue;
3966 ret = txgbe_add_5tuple_filter(dev, filter);
3972 txgbe_remove_5tuple_filter(dev, filter);
3979 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
3980 struct rte_eth_ethertype_filter *filter,
3983 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3984 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3988 struct txgbe_ethertype_filter ethertype_filter;
3990 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3993 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
3994 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
3995 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3996 " ethertype filter.", filter->ether_type);
4000 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4001 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4004 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4005 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4009 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4010 if (ret >= 0 && add) {
4011 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4012 filter->ether_type);
4015 if (ret < 0 && !add) {
4016 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4017 filter->ether_type);
4022 etqf = TXGBE_ETFLT_ENA;
4023 etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4024 etqs |= TXGBE_ETCLS_QPID(filter->queue);
4025 etqs |= TXGBE_ETCLS_QENA;
4027 ethertype_filter.ethertype = filter->ether_type;
4028 ethertype_filter.etqf = etqf;
4029 ethertype_filter.etqs = etqs;
4030 ethertype_filter.conf = FALSE;
4031 ret = txgbe_ethertype_filter_insert(filter_info,
4034 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4038 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4042 wr32(hw, TXGBE_ETFLT(ret), etqf);
4043 wr32(hw, TXGBE_ETCLS(ret), etqs);
4050 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
4051 enum rte_filter_type filter_type,
4052 enum rte_filter_op filter_op,
4057 switch (filter_type) {
4058 case RTE_ETH_FILTER_GENERIC:
4059 if (filter_op != RTE_ETH_FILTER_GET)
4061 *(const void **)arg = &txgbe_flow_ops;
4064 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4074 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4075 u8 **mc_addr_ptr, u32 *vmdq)
4080 mc_addr = *mc_addr_ptr;
4081 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4086 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4087 struct rte_ether_addr *mc_addr_set,
4088 uint32_t nb_mc_addr)
4090 struct txgbe_hw *hw;
4093 hw = TXGBE_DEV_HW(dev);
4094 mc_addr_list = (u8 *)mc_addr_set;
4095 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4096 txgbe_dev_addr_list_itr, TRUE);
4100 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4102 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4103 uint64_t systime_cycles;
4105 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4106 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4108 return systime_cycles;
4112 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4114 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4115 uint64_t rx_tstamp_cycles;
4117 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4118 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4119 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4121 return rx_tstamp_cycles;
4125 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4127 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4128 uint64_t tx_tstamp_cycles;
4130 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4131 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4132 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4134 return tx_tstamp_cycles;
4138 txgbe_start_timecounters(struct rte_eth_dev *dev)
4140 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4141 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4142 struct rte_eth_link link;
4143 uint32_t incval = 0;
4146 /* Get current link speed. */
4147 txgbe_dev_link_update(dev, 1);
4148 rte_eth_linkstatus_get(dev, &link);
4150 switch (link.link_speed) {
4151 case ETH_SPEED_NUM_100M:
4152 incval = TXGBE_INCVAL_100;
4153 shift = TXGBE_INCVAL_SHIFT_100;
4155 case ETH_SPEED_NUM_1G:
4156 incval = TXGBE_INCVAL_1GB;
4157 shift = TXGBE_INCVAL_SHIFT_1GB;
4159 case ETH_SPEED_NUM_10G:
4161 incval = TXGBE_INCVAL_10GB;
4162 shift = TXGBE_INCVAL_SHIFT_10GB;
4166 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4168 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4169 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4170 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4172 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4173 adapter->systime_tc.cc_shift = shift;
4174 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4176 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4177 adapter->rx_tstamp_tc.cc_shift = shift;
4178 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4180 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4181 adapter->tx_tstamp_tc.cc_shift = shift;
4182 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4186 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4188 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4190 adapter->systime_tc.nsec += delta;
4191 adapter->rx_tstamp_tc.nsec += delta;
4192 adapter->tx_tstamp_tc.nsec += delta;
4198 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4201 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4203 ns = rte_timespec_to_ns(ts);
4204 /* Set the timecounters to a new value. */
4205 adapter->systime_tc.nsec = ns;
4206 adapter->rx_tstamp_tc.nsec = ns;
4207 adapter->tx_tstamp_tc.nsec = ns;
4213 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4215 uint64_t ns, systime_cycles;
4216 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4218 systime_cycles = txgbe_read_systime_cyclecounter(dev);
4219 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4220 *ts = rte_ns_to_timespec(ns);
4226 txgbe_timesync_enable(struct rte_eth_dev *dev)
4228 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4231 /* Stop the timesync system time. */
4232 wr32(hw, TXGBE_TSTIMEINC, 0x0);
4233 /* Reset the timesync system time value. */
4234 wr32(hw, TXGBE_TSTIMEL, 0x0);
4235 wr32(hw, TXGBE_TSTIMEH, 0x0);
4237 txgbe_start_timecounters(dev);
4239 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4240 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4241 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4243 /* Enable timestamping of received PTP packets. */
4244 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4245 tsync_ctl |= TXGBE_TSRXCTL_ENA;
4246 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4248 /* Enable timestamping of transmitted PTP packets. */
4249 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4250 tsync_ctl |= TXGBE_TSTXCTL_ENA;
4251 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4259 txgbe_timesync_disable(struct rte_eth_dev *dev)
4261 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4264 /* Disable timestamping of transmitted PTP packets. */
4265 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4266 tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4267 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4269 /* Disable timestamping of received PTP packets. */
4270 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4271 tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4272 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4274 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4275 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4277 /* Stop incrementating the System Time registers. */
4278 wr32(hw, TXGBE_TSTIMEINC, 0);
4284 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4285 struct timespec *timestamp,
4286 uint32_t flags __rte_unused)
4288 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4289 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4290 uint32_t tsync_rxctl;
4291 uint64_t rx_tstamp_cycles;
4294 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4295 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4298 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4299 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4300 *timestamp = rte_ns_to_timespec(ns);
4306 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4307 struct timespec *timestamp)
4309 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4310 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4311 uint32_t tsync_txctl;
4312 uint64_t tx_tstamp_cycles;
4315 tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4316 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4319 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4320 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4321 *timestamp = rte_ns_to_timespec(ns);
4327 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4331 const struct reg_info *reg_group;
4332 const struct reg_info **reg_set = txgbe_regs_others;
4334 while ((reg_group = reg_set[g_ind++]))
4335 count += txgbe_regs_group_count(reg_group);
4341 txgbe_get_regs(struct rte_eth_dev *dev,
4342 struct rte_dev_reg_info *regs)
4344 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4345 uint32_t *data = regs->data;
4348 const struct reg_info *reg_group;
4349 const struct reg_info **reg_set = txgbe_regs_others;
4352 regs->length = txgbe_get_reg_length(dev);
4353 regs->width = sizeof(uint32_t);
4357 /* Support only full register dump */
4358 if (regs->length == 0 ||
4359 regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4360 regs->version = hw->mac.type << 24 |
4361 hw->revision_id << 16 |
4363 while ((reg_group = reg_set[g_ind++]))
4364 count += txgbe_read_regs_group(dev, &data[count],
4373 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4375 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4377 /* Return unit is byte count */
4378 return hw->rom.word_size * 2;
4382 txgbe_get_eeprom(struct rte_eth_dev *dev,
4383 struct rte_dev_eeprom_info *in_eeprom)
4385 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4386 struct txgbe_rom_info *eeprom = &hw->rom;
4387 uint16_t *data = in_eeprom->data;
4390 first = in_eeprom->offset >> 1;
4391 length = in_eeprom->length >> 1;
4392 if (first > hw->rom.word_size ||
4393 ((first + length) > hw->rom.word_size))
4396 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4398 return eeprom->readw_buffer(hw, first, length, data);
4402 txgbe_set_eeprom(struct rte_eth_dev *dev,
4403 struct rte_dev_eeprom_info *in_eeprom)
4405 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4406 struct txgbe_rom_info *eeprom = &hw->rom;
4407 uint16_t *data = in_eeprom->data;
4410 first = in_eeprom->offset >> 1;
4411 length = in_eeprom->length >> 1;
4412 if (first > hw->rom.word_size ||
4413 ((first + length) > hw->rom.word_size))
4416 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4418 return eeprom->writew_buffer(hw, first, length, data);
4422 txgbe_get_module_info(struct rte_eth_dev *dev,
4423 struct rte_eth_dev_module_info *modinfo)
4425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4427 uint8_t sff8472_rev, addr_mode;
4428 bool page_swap = false;
4430 /* Check whether we support SFF-8472 or not */
4431 status = hw->phy.read_i2c_eeprom(hw,
4432 TXGBE_SFF_SFF_8472_COMP,
4437 /* addressing mode is not supported */
4438 status = hw->phy.read_i2c_eeprom(hw,
4439 TXGBE_SFF_SFF_8472_SWAP,
4444 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4446 "Address change required to access page 0xA2, "
4447 "but not supported. Please report the module "
4448 "type to the driver maintainers.");
4452 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4453 /* We have a SFP, but it does not support SFF-8472 */
4454 modinfo->type = RTE_ETH_MODULE_SFF_8079;
4455 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4457 /* We have a SFP which supports a revision of SFF-8472. */
4458 modinfo->type = RTE_ETH_MODULE_SFF_8472;
4459 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4466 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4467 struct rte_dev_eeprom_info *info)
4469 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4470 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4471 uint8_t databyte = 0xFF;
4472 uint8_t *data = info->data;
4475 if (info->length == 0)
4478 for (i = info->offset; i < info->offset + info->length; i++) {
4479 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4480 status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4482 status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4487 data[i - info->offset] = databyte;
4494 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4497 case txgbe_mac_raptor:
4505 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4506 struct rte_eth_dcb_info *dcb_info)
4508 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4509 struct txgbe_dcb_tc_config *tc;
4510 struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4514 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4515 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4517 dcb_info->nb_tcs = 1;
4519 tc_queue = &dcb_info->tc_queue;
4520 nb_tcs = dcb_info->nb_tcs;
4522 if (dcb_config->vt_mode) { /* vt is enabled */
4523 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4524 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4525 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4526 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4527 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4528 for (j = 0; j < nb_tcs; j++) {
4529 tc_queue->tc_rxq[0][j].base = j;
4530 tc_queue->tc_rxq[0][j].nb_queue = 1;
4531 tc_queue->tc_txq[0][j].base = j;
4532 tc_queue->tc_txq[0][j].nb_queue = 1;
4535 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4536 for (j = 0; j < nb_tcs; j++) {
4537 tc_queue->tc_rxq[i][j].base =
4539 tc_queue->tc_rxq[i][j].nb_queue = 1;
4540 tc_queue->tc_txq[i][j].base =
4542 tc_queue->tc_txq[i][j].nb_queue = 1;
4546 } else { /* vt is disabled */
4547 struct rte_eth_dcb_rx_conf *rx_conf =
4548 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4549 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4550 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4551 if (dcb_info->nb_tcs == ETH_4_TCS) {
4552 for (i = 0; i < dcb_info->nb_tcs; i++) {
4553 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4554 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4556 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4557 dcb_info->tc_queue.tc_txq[0][1].base = 64;
4558 dcb_info->tc_queue.tc_txq[0][2].base = 96;
4559 dcb_info->tc_queue.tc_txq[0][3].base = 112;
4560 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4561 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4562 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4563 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4564 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4565 for (i = 0; i < dcb_info->nb_tcs; i++) {
4566 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4567 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4569 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4570 dcb_info->tc_queue.tc_txq[0][1].base = 32;
4571 dcb_info->tc_queue.tc_txq[0][2].base = 64;
4572 dcb_info->tc_queue.tc_txq[0][3].base = 80;
4573 dcb_info->tc_queue.tc_txq[0][4].base = 96;
4574 dcb_info->tc_queue.tc_txq[0][5].base = 104;
4575 dcb_info->tc_queue.tc_txq[0][6].base = 112;
4576 dcb_info->tc_queue.tc_txq[0][7].base = 120;
4577 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4578 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4579 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4580 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4581 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4582 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4583 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4584 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4587 for (i = 0; i < dcb_info->nb_tcs; i++) {
4588 tc = &dcb_config->tc_config[i];
4589 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4594 /* Update e-tag ether type */
4596 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4597 uint16_t ether_type)
4599 uint32_t etag_etype;
4601 etag_etype = rd32(hw, TXGBE_EXTAG);
4602 etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4603 etag_etype |= ether_type;
4604 wr32(hw, TXGBE_EXTAG, etag_etype);
4610 /* Enable e-tag tunnel */
4612 txgbe_e_tag_enable(struct txgbe_hw *hw)
4614 uint32_t etag_etype;
4616 etag_etype = rd32(hw, TXGBE_PORTCTL);
4617 etag_etype |= TXGBE_PORTCTL_ETAG;
4618 wr32(hw, TXGBE_PORTCTL, etag_etype);
4625 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4626 struct txgbe_l2_tunnel_conf *l2_tunnel)
4629 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4630 uint32_t i, rar_entries;
4631 uint32_t rar_low, rar_high;
4633 rar_entries = hw->mac.num_rar_entries;
4635 for (i = 1; i < rar_entries; i++) {
4636 wr32(hw, TXGBE_ETHADDRIDX, i);
4637 rar_high = rd32(hw, TXGBE_ETHADDRH);
4638 rar_low = rd32(hw, TXGBE_ETHADDRL);
4639 if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4640 (rar_high & TXGBE_ETHADDRH_ETAG) &&
4641 (TXGBE_ETHADDRL_ETAG(rar_low) ==
4642 l2_tunnel->tunnel_id)) {
4643 wr32(hw, TXGBE_ETHADDRL, 0);
4644 wr32(hw, TXGBE_ETHADDRH, 0);
4646 txgbe_clear_vmdq(hw, i, BIT_MASK32);
4656 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4657 struct txgbe_l2_tunnel_conf *l2_tunnel)
4660 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4661 uint32_t i, rar_entries;
4662 uint32_t rar_low, rar_high;
4664 /* One entry for one tunnel. Try to remove potential existing entry. */
4665 txgbe_e_tag_filter_del(dev, l2_tunnel);
4667 rar_entries = hw->mac.num_rar_entries;
4669 for (i = 1; i < rar_entries; i++) {
4670 wr32(hw, TXGBE_ETHADDRIDX, i);
4671 rar_high = rd32(hw, TXGBE_ETHADDRH);
4672 if (rar_high & TXGBE_ETHADDRH_VLD) {
4675 txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4676 rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4677 rar_low = l2_tunnel->tunnel_id;
4679 wr32(hw, TXGBE_ETHADDRL, rar_low);
4680 wr32(hw, TXGBE_ETHADDRH, rar_high);
4686 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4687 " Please remove a rule before adding a new one.");
4691 static inline struct txgbe_l2_tn_filter *
4692 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4693 struct txgbe_l2_tn_key *key)
4697 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4701 return l2_tn_info->hash_map[ret];
4705 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4706 struct txgbe_l2_tn_filter *l2_tn_filter)
4710 ret = rte_hash_add_key(l2_tn_info->hash_handle,
4711 &l2_tn_filter->key);
4715 "Failed to insert L2 tunnel filter"
4716 " to hash table %d!",
4721 l2_tn_info->hash_map[ret] = l2_tn_filter;
4723 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4729 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4730 struct txgbe_l2_tn_key *key)
4733 struct txgbe_l2_tn_filter *l2_tn_filter;
4735 ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4739 "No such L2 tunnel filter to delete %d!",
4744 l2_tn_filter = l2_tn_info->hash_map[ret];
4745 l2_tn_info->hash_map[ret] = NULL;
4747 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4748 rte_free(l2_tn_filter);
4753 /* Add l2 tunnel filter */
4755 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4756 struct txgbe_l2_tunnel_conf *l2_tunnel,
4760 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4761 struct txgbe_l2_tn_key key;
4762 struct txgbe_l2_tn_filter *node;
4765 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4766 key.tn_id = l2_tunnel->tunnel_id;
4768 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4772 "The L2 tunnel filter already exists!");
4776 node = rte_zmalloc("txgbe_l2_tn",
4777 sizeof(struct txgbe_l2_tn_filter),
4782 rte_memcpy(&node->key,
4784 sizeof(struct txgbe_l2_tn_key));
4785 node->pool = l2_tunnel->pool;
4786 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4793 switch (l2_tunnel->l2_tunnel_type) {
4794 case RTE_L2_TUNNEL_TYPE_E_TAG:
4795 ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4798 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4803 if (!restore && ret < 0)
4804 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4809 /* Delete l2 tunnel filter */
4811 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4812 struct txgbe_l2_tunnel_conf *l2_tunnel)
4815 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4816 struct txgbe_l2_tn_key key;
4818 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4819 key.tn_id = l2_tunnel->tunnel_id;
4820 ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4824 switch (l2_tunnel->l2_tunnel_type) {
4825 case RTE_L2_TUNNEL_TYPE_E_TAG:
4826 ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4829 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4838 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4842 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4844 ctrl = rd32(hw, TXGBE_POOLCTL);
4845 ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4847 ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4848 wr32(hw, TXGBE_POOLCTL, ctrl);
4853 /* restore n-tuple filter */
4855 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
4857 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4858 struct txgbe_5tuple_filter *node;
4860 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
4861 txgbe_inject_5tuple_filter(dev, node);
4865 /* restore ethernet type filter */
4867 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
4869 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4870 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4873 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
4874 if (filter_info->ethertype_mask & (1 << i)) {
4875 wr32(hw, TXGBE_ETFLT(i),
4876 filter_info->ethertype_filters[i].etqf);
4877 wr32(hw, TXGBE_ETCLS(i),
4878 filter_info->ethertype_filters[i].etqs);
4884 /* restore SYN filter */
4886 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
4888 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4889 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4892 synqf = filter_info->syn_info;
4894 if (synqf & TXGBE_SYNCLS_ENA) {
4895 wr32(hw, TXGBE_SYNCLS, synqf);
4900 /* restore L2 tunnel filter */
4902 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
4904 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4905 struct txgbe_l2_tn_filter *node;
4906 struct txgbe_l2_tunnel_conf l2_tn_conf;
4908 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
4909 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
4910 l2_tn_conf.tunnel_id = node->key.tn_id;
4911 l2_tn_conf.pool = node->pool;
4912 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
4916 /* restore rss filter */
4918 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
4920 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4922 if (filter_info->rss_info.conf.queue_num)
4923 txgbe_config_rss_filter(dev,
4924 &filter_info->rss_info, TRUE);
4928 txgbe_filter_restore(struct rte_eth_dev *dev)
4930 txgbe_ntuple_filter_restore(dev);
4931 txgbe_ethertype_filter_restore(dev);
4932 txgbe_syn_filter_restore(dev);
4933 txgbe_fdir_filter_restore(dev);
4934 txgbe_l2_tn_filter_restore(dev);
4935 txgbe_rss_filter_restore(dev);
4941 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
4943 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4944 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4946 if (l2_tn_info->e_tag_en)
4947 (void)txgbe_e_tag_enable(hw);
4949 if (l2_tn_info->e_tag_fwd_en)
4950 (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
4952 (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
4955 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4956 .dev_configure = txgbe_dev_configure,
4957 .dev_infos_get = txgbe_dev_info_get,
4958 .dev_start = txgbe_dev_start,
4959 .dev_stop = txgbe_dev_stop,
4960 .dev_set_link_up = txgbe_dev_set_link_up,
4961 .dev_set_link_down = txgbe_dev_set_link_down,
4962 .dev_close = txgbe_dev_close,
4963 .dev_reset = txgbe_dev_reset,
4964 .promiscuous_enable = txgbe_dev_promiscuous_enable,
4965 .promiscuous_disable = txgbe_dev_promiscuous_disable,
4966 .allmulticast_enable = txgbe_dev_allmulticast_enable,
4967 .allmulticast_disable = txgbe_dev_allmulticast_disable,
4968 .link_update = txgbe_dev_link_update,
4969 .stats_get = txgbe_dev_stats_get,
4970 .xstats_get = txgbe_dev_xstats_get,
4971 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
4972 .stats_reset = txgbe_dev_stats_reset,
4973 .xstats_reset = txgbe_dev_xstats_reset,
4974 .xstats_get_names = txgbe_dev_xstats_get_names,
4975 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
4976 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
4977 .fw_version_get = txgbe_fw_version_get,
4978 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
4979 .mtu_set = txgbe_dev_mtu_set,
4980 .vlan_filter_set = txgbe_vlan_filter_set,
4981 .vlan_tpid_set = txgbe_vlan_tpid_set,
4982 .vlan_offload_set = txgbe_vlan_offload_set,
4983 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
4984 .rx_queue_start = txgbe_dev_rx_queue_start,
4985 .rx_queue_stop = txgbe_dev_rx_queue_stop,
4986 .tx_queue_start = txgbe_dev_tx_queue_start,
4987 .tx_queue_stop = txgbe_dev_tx_queue_stop,
4988 .rx_queue_setup = txgbe_dev_rx_queue_setup,
4989 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
4990 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
4991 .rx_queue_release = txgbe_dev_rx_queue_release,
4992 .tx_queue_setup = txgbe_dev_tx_queue_setup,
4993 .tx_queue_release = txgbe_dev_tx_queue_release,
4994 .dev_led_on = txgbe_dev_led_on,
4995 .dev_led_off = txgbe_dev_led_off,
4996 .flow_ctrl_get = txgbe_flow_ctrl_get,
4997 .flow_ctrl_set = txgbe_flow_ctrl_set,
4998 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
4999 .mac_addr_add = txgbe_add_rar,
5000 .mac_addr_remove = txgbe_remove_rar,
5001 .mac_addr_set = txgbe_set_default_mac_addr,
5002 .uc_hash_table_set = txgbe_uc_hash_table_set,
5003 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
5004 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
5005 .reta_update = txgbe_dev_rss_reta_update,
5006 .reta_query = txgbe_dev_rss_reta_query,
5007 .rss_hash_update = txgbe_dev_rss_hash_update,
5008 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
5009 .filter_ctrl = txgbe_dev_filter_ctrl,
5010 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
5011 .rxq_info_get = txgbe_rxq_info_get,
5012 .txq_info_get = txgbe_txq_info_get,
5013 .timesync_enable = txgbe_timesync_enable,
5014 .timesync_disable = txgbe_timesync_disable,
5015 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5016 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5017 .get_reg = txgbe_get_regs,
5018 .get_eeprom_length = txgbe_get_eeprom_length,
5019 .get_eeprom = txgbe_get_eeprom,
5020 .set_eeprom = txgbe_set_eeprom,
5021 .get_module_info = txgbe_get_module_info,
5022 .get_module_eeprom = txgbe_get_module_eeprom,
5023 .get_dcb_info = txgbe_dev_get_dcb_info,
5024 .timesync_adjust_time = txgbe_timesync_adjust_time,
5025 .timesync_read_time = txgbe_timesync_read_time,
5026 .timesync_write_time = txgbe_timesync_write_time,
5027 .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
5030 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5031 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5032 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5034 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
5035 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
5037 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5038 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
5040 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5041 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
5044 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5045 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);