1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
26 static const struct reg_info txgbe_regs_general[] = {
27 {TXGBE_RST, 1, 1, "TXGBE_RST"},
28 {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30 {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
36 static const struct reg_info txgbe_regs_nvm[] = {
40 static const struct reg_info txgbe_regs_interrupt[] = {
44 static const struct reg_info txgbe_regs_fctl_others[] = {
48 static const struct reg_info txgbe_regs_rxdma[] = {
52 static const struct reg_info txgbe_regs_rx[] = {
56 static struct reg_info txgbe_regs_tx[] = {
60 static const struct reg_info txgbe_regs_wakeup[] = {
64 static const struct reg_info txgbe_regs_dcb[] = {
68 static const struct reg_info txgbe_regs_mac[] = {
72 static const struct reg_info txgbe_regs_diagnostic[] = {
77 static const struct reg_info *txgbe_regs_others[] = {
81 txgbe_regs_fctl_others,
88 txgbe_regs_diagnostic,
91 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
92 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
93 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
94 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
95 static int txgbe_dev_close(struct rte_eth_dev *dev);
96 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
97 int wait_to_complete);
98 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
99 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
100 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
103 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
104 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
105 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
106 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
107 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
108 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
109 struct rte_intr_handle *handle);
110 static void txgbe_dev_interrupt_handler(void *param);
111 static void txgbe_dev_interrupt_delayed_handler(void *param);
112 static void txgbe_configure_msix(struct rte_eth_dev *dev);
114 static int txgbe_filter_restore(struct rte_eth_dev *dev);
116 #define TXGBE_SET_HWSTRIP(h, q) do {\
117 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
118 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
119 (h)->bitmap[idx] |= 1 << bit;\
122 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
123 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
124 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
125 (h)->bitmap[idx] &= ~(1 << bit);\
128 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
129 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
130 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
131 (r) = (h)->bitmap[idx] >> bit & 1;\
135 * The set of PCI devices this driver supports
137 static const struct rte_pci_id pci_id_txgbe_map[] = {
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
140 { .vendor_id = 0, /* sentinel */ },
143 static const struct rte_eth_desc_lim rx_desc_lim = {
144 .nb_max = TXGBE_RING_DESC_MAX,
145 .nb_min = TXGBE_RING_DESC_MIN,
146 .nb_align = TXGBE_RXD_ALIGN,
149 static const struct rte_eth_desc_lim tx_desc_lim = {
150 .nb_max = TXGBE_RING_DESC_MAX,
151 .nb_min = TXGBE_RING_DESC_MIN,
152 .nb_align = TXGBE_TXD_ALIGN,
153 .nb_seg_max = TXGBE_TX_MAX_SEG,
154 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
157 static const struct eth_dev_ops txgbe_eth_dev_ops;
159 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
160 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
161 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
163 HW_XSTAT(mng_bmc2host_packets),
164 HW_XSTAT(mng_host2bmc_packets),
166 HW_XSTAT(rx_packets),
167 HW_XSTAT(tx_packets),
170 HW_XSTAT(rx_total_bytes),
171 HW_XSTAT(rx_total_packets),
172 HW_XSTAT(tx_total_packets),
173 HW_XSTAT(rx_total_missed_packets),
174 HW_XSTAT(rx_broadcast_packets),
175 HW_XSTAT(rx_multicast_packets),
176 HW_XSTAT(rx_management_packets),
177 HW_XSTAT(tx_management_packets),
178 HW_XSTAT(rx_management_dropped),
181 HW_XSTAT(rx_crc_errors),
182 HW_XSTAT(rx_illegal_byte_errors),
183 HW_XSTAT(rx_error_bytes),
184 HW_XSTAT(rx_mac_short_packet_dropped),
185 HW_XSTAT(rx_length_errors),
186 HW_XSTAT(rx_undersize_errors),
187 HW_XSTAT(rx_fragment_errors),
188 HW_XSTAT(rx_oversize_errors),
189 HW_XSTAT(rx_jabber_errors),
190 HW_XSTAT(rx_l3_l4_xsum_error),
191 HW_XSTAT(mac_local_errors),
192 HW_XSTAT(mac_remote_errors),
195 HW_XSTAT(flow_director_added_filters),
196 HW_XSTAT(flow_director_removed_filters),
197 HW_XSTAT(flow_director_filter_add_errors),
198 HW_XSTAT(flow_director_filter_remove_errors),
199 HW_XSTAT(flow_director_matched_filters),
200 HW_XSTAT(flow_director_missed_filters),
203 HW_XSTAT(rx_fcoe_crc_errors),
204 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
205 HW_XSTAT(rx_fcoe_dropped),
206 HW_XSTAT(rx_fcoe_packets),
207 HW_XSTAT(tx_fcoe_packets),
208 HW_XSTAT(rx_fcoe_bytes),
209 HW_XSTAT(tx_fcoe_bytes),
210 HW_XSTAT(rx_fcoe_no_ddp),
211 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
214 HW_XSTAT(tx_macsec_pkts_untagged),
215 HW_XSTAT(tx_macsec_pkts_encrypted),
216 HW_XSTAT(tx_macsec_pkts_protected),
217 HW_XSTAT(tx_macsec_octets_encrypted),
218 HW_XSTAT(tx_macsec_octets_protected),
219 HW_XSTAT(rx_macsec_pkts_untagged),
220 HW_XSTAT(rx_macsec_pkts_badtag),
221 HW_XSTAT(rx_macsec_pkts_nosci),
222 HW_XSTAT(rx_macsec_pkts_unknownsci),
223 HW_XSTAT(rx_macsec_octets_decrypted),
224 HW_XSTAT(rx_macsec_octets_validated),
225 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
226 HW_XSTAT(rx_macsec_sc_pkts_delayed),
227 HW_XSTAT(rx_macsec_sc_pkts_late),
228 HW_XSTAT(rx_macsec_sa_pkts_ok),
229 HW_XSTAT(rx_macsec_sa_pkts_invalid),
230 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
231 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
232 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
235 HW_XSTAT(rx_size_64_packets),
236 HW_XSTAT(rx_size_65_to_127_packets),
237 HW_XSTAT(rx_size_128_to_255_packets),
238 HW_XSTAT(rx_size_256_to_511_packets),
239 HW_XSTAT(rx_size_512_to_1023_packets),
240 HW_XSTAT(rx_size_1024_to_max_packets),
241 HW_XSTAT(tx_size_64_packets),
242 HW_XSTAT(tx_size_65_to_127_packets),
243 HW_XSTAT(tx_size_128_to_255_packets),
244 HW_XSTAT(tx_size_256_to_511_packets),
245 HW_XSTAT(tx_size_512_to_1023_packets),
246 HW_XSTAT(tx_size_1024_to_max_packets),
249 HW_XSTAT(tx_xon_packets),
250 HW_XSTAT(rx_xon_packets),
251 HW_XSTAT(tx_xoff_packets),
252 HW_XSTAT(rx_xoff_packets),
254 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
255 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
256 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
257 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
260 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
261 sizeof(rte_txgbe_stats_strings[0]))
263 /* Per-priority statistics */
264 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
265 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
266 UP_XSTAT(rx_up_packets),
267 UP_XSTAT(tx_up_packets),
268 UP_XSTAT(rx_up_bytes),
269 UP_XSTAT(tx_up_bytes),
270 UP_XSTAT(rx_up_drop_packets),
272 UP_XSTAT(tx_up_xon_packets),
273 UP_XSTAT(rx_up_xon_packets),
274 UP_XSTAT(tx_up_xoff_packets),
275 UP_XSTAT(rx_up_xoff_packets),
276 UP_XSTAT(rx_up_dropped),
277 UP_XSTAT(rx_up_mbuf_alloc_errors),
278 UP_XSTAT(tx_up_xon2off_packets),
281 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
282 sizeof(rte_txgbe_up_strings[0]))
284 /* Per-queue statistics */
285 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
286 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
287 QP_XSTAT(rx_qp_packets),
288 QP_XSTAT(tx_qp_packets),
289 QP_XSTAT(rx_qp_bytes),
290 QP_XSTAT(tx_qp_bytes),
291 QP_XSTAT(rx_qp_mc_packets),
294 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
295 sizeof(rte_txgbe_qp_strings[0]))
298 txgbe_is_sfp(struct txgbe_hw *hw)
300 switch (hw->phy.type) {
301 case txgbe_phy_sfp_avago:
302 case txgbe_phy_sfp_ftl:
303 case txgbe_phy_sfp_intel:
304 case txgbe_phy_sfp_unknown:
305 case txgbe_phy_sfp_tyco_passive:
306 case txgbe_phy_sfp_unknown_passive:
313 static inline int32_t
314 txgbe_pf_reset_hw(struct txgbe_hw *hw)
319 status = hw->mac.reset_hw(hw);
321 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
322 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
323 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
324 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
327 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
333 txgbe_enable_intr(struct rte_eth_dev *dev)
335 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
336 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
338 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
339 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
340 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
345 txgbe_disable_intr(struct txgbe_hw *hw)
347 PMD_INIT_FUNC_TRACE();
349 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
350 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
351 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
356 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
361 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
362 struct txgbe_stat_mappings *stat_mappings =
363 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
364 uint32_t qsmr_mask = 0;
365 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
369 if (hw->mac.type != txgbe_mac_raptor)
372 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
375 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
376 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
379 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
380 if (n >= TXGBE_NB_STAT_MAPPING) {
381 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
384 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
386 /* Now clear any previous stat_idx set */
387 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
389 stat_mappings->tqsm[n] &= ~clearing_mask;
391 stat_mappings->rqsm[n] &= ~clearing_mask;
393 q_map = (uint32_t)stat_idx;
394 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
395 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
397 stat_mappings->tqsm[n] |= qsmr_mask;
399 stat_mappings->rqsm[n] |= qsmr_mask;
401 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
402 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
404 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
405 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
410 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
414 struct txgbe_dcb_tc_config *tc;
416 UNREFERENCED_PARAMETER(hw);
418 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
419 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
420 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
421 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
422 tc = &dcb_config->tc_config[i];
423 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
424 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
425 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
426 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
427 tc->pfc = txgbe_dcb_pfc_disabled;
430 /* Initialize default user to priority mapping, UPx->TC0 */
431 tc = &dcb_config->tc_config[0];
432 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
433 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
434 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
435 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
436 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
438 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
439 dcb_config->pfc_mode_enable = false;
440 dcb_config->vt_mode = true;
441 dcb_config->round_robin_enable = false;
442 /* support all DCB capabilities */
443 dcb_config->support.capabilities = 0xFF;
447 * Ensure that all locks are released before first NVM or PHY access
450 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
455 * These ones are more tricky since they are common to all ports; but
456 * swfw_sync retries last long enough (1s) to be almost sure that if
457 * lock can not be taken it is due to an improper lock of the
460 mask = TXGBE_MNGSEM_SWPHY |
462 TXGBE_MNGSEM_SWFLASH;
463 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
464 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
466 hw->mac.release_swfw_sync(hw, mask);
470 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
472 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
473 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
474 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
475 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
476 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
477 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
478 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
479 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
480 const struct rte_memzone *mz;
485 PMD_INIT_FUNC_TRACE();
487 eth_dev->dev_ops = &txgbe_eth_dev_ops;
488 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
489 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
490 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
491 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
492 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
493 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
496 * For secondary processes, we don't initialise any further as primary
497 * has already done this work. Only check we don't need a different
498 * RX and TX function.
500 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
501 struct txgbe_tx_queue *txq;
502 /* TX queue function in primary, set by last queue initialized
503 * Tx queue may not initialized by primary process
505 if (eth_dev->data->tx_queues) {
506 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
507 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
508 txgbe_set_tx_function(eth_dev, txq);
510 /* Use default TX function if we get here */
511 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
512 "Using default TX function.");
515 txgbe_set_rx_function(eth_dev);
520 rte_eth_copy_pci_info(eth_dev, pci_dev);
522 /* Vendor and Device ID need to be set before init of shared code */
523 hw->device_id = pci_dev->id.device_id;
524 hw->vendor_id = pci_dev->id.vendor_id;
525 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
526 hw->allow_unsupported_sfp = 1;
528 /* Reserve memory for interrupt status block */
529 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
530 16, TXGBE_ALIGN, SOCKET_ID_ANY);
534 hw->isb_dma = TMZ_PADDR(mz);
535 hw->isb_mem = TMZ_VADDR(mz);
537 /* Initialize the shared code (base driver) */
538 err = txgbe_init_shared_code(hw);
540 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
544 /* Unlock any pending hardware semaphore */
545 txgbe_swfw_lock_reset(hw);
547 /* Initialize DCB configuration*/
548 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
549 txgbe_dcb_init(hw, dcb_config);
551 /* Get Hardware Flow Control setting */
552 hw->fc.requested_mode = txgbe_fc_full;
553 hw->fc.current_mode = txgbe_fc_full;
554 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
555 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
556 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
557 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
561 err = hw->rom.init_params(hw);
563 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
567 /* Make sure we have a good EEPROM before we read from it */
568 err = hw->rom.validate_checksum(hw, &csum);
570 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
574 err = hw->mac.init_hw(hw);
577 * Devices with copper phys will fail to initialise if txgbe_init_hw()
578 * is called too soon after the kernel driver unbinding/binding occurs.
579 * The failure occurs in txgbe_identify_phy() for all devices,
580 * but for non-copper devies, txgbe_identify_sfp_module() is
581 * also called. See txgbe_identify_phy(). The reason for the
582 * failure is not known, and only occuts when virtualisation features
583 * are disabled in the bios. A delay of 200ms was found to be enough by
584 * trial-and-error, and is doubled to be safe.
586 if (err && hw->phy.media_type == txgbe_media_type_copper) {
588 err = hw->mac.init_hw(hw);
591 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
594 if (err == TXGBE_ERR_EEPROM_VERSION) {
595 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
596 "LOM. Please be aware there may be issues associated "
597 "with your hardware.");
598 PMD_INIT_LOG(ERR, "If you are experiencing problems "
599 "please contact your hardware representative "
600 "who provided you with this hardware.");
601 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
602 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
605 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
609 /* Reset the hw statistics */
610 txgbe_dev_stats_reset(eth_dev);
612 /* disable interrupt */
613 txgbe_disable_intr(hw);
615 /* Allocate memory for storing MAC addresses */
616 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
617 hw->mac.num_rar_entries, 0);
618 if (eth_dev->data->mac_addrs == NULL) {
620 "Failed to allocate %u bytes needed to store "
622 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
626 /* Copy the permanent MAC address */
627 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
628 ð_dev->data->mac_addrs[0]);
630 /* Allocate memory for storing hash filter MAC addresses */
631 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
632 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
633 if (eth_dev->data->hash_mac_addrs == NULL) {
635 "Failed to allocate %d bytes needed to store MAC addresses",
636 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
640 /* initialize the vfta */
641 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
643 /* initialize the hw strip bitmap*/
644 memset(hwstrip, 0, sizeof(*hwstrip));
646 /* initialize PF if max_vfs not zero */
647 ret = txgbe_pf_host_init(eth_dev);
649 rte_free(eth_dev->data->mac_addrs);
650 eth_dev->data->mac_addrs = NULL;
651 rte_free(eth_dev->data->hash_mac_addrs);
652 eth_dev->data->hash_mac_addrs = NULL;
656 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
657 /* let hardware know driver is loaded */
658 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
659 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
660 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
661 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
664 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
665 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
666 (int)hw->mac.type, (int)hw->phy.type,
667 (int)hw->phy.sfp_type);
669 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
670 (int)hw->mac.type, (int)hw->phy.type);
672 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
673 eth_dev->data->port_id, pci_dev->id.vendor_id,
674 pci_dev->id.device_id);
676 rte_intr_callback_register(intr_handle,
677 txgbe_dev_interrupt_handler, eth_dev);
679 /* enable uio/vfio intr/eventfd mapping */
680 rte_intr_enable(intr_handle);
682 /* enable support intr */
683 txgbe_enable_intr(eth_dev);
685 /* initialize filter info */
686 memset(filter_info, 0,
687 sizeof(struct txgbe_filter_info));
689 /* initialize 5tuple filter list */
690 TAILQ_INIT(&filter_info->fivetuple_list);
692 /* initialize l2 tunnel filter list & hash */
693 txgbe_l2_tn_filter_init(eth_dev);
695 /* initialize bandwidth configuration info */
696 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
702 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
704 PMD_INIT_FUNC_TRACE();
706 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
709 txgbe_dev_close(eth_dev);
714 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
716 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
717 struct txgbe_5tuple_filter *p_5tuple;
719 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
720 TAILQ_REMOVE(&filter_info->fivetuple_list,
725 memset(filter_info->fivetuple_mask, 0,
726 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
731 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
733 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
734 struct txgbe_l2_tn_filter *l2_tn_filter;
736 if (l2_tn_info->hash_map)
737 rte_free(l2_tn_info->hash_map);
738 if (l2_tn_info->hash_handle)
739 rte_hash_free(l2_tn_info->hash_handle);
741 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
742 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
745 rte_free(l2_tn_filter);
751 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
753 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
754 char l2_tn_hash_name[RTE_HASH_NAMESIZE];
755 struct rte_hash_parameters l2_tn_hash_params = {
756 .name = l2_tn_hash_name,
757 .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
758 .key_len = sizeof(struct txgbe_l2_tn_key),
759 .hash_func = rte_hash_crc,
760 .hash_func_init_val = 0,
761 .socket_id = rte_socket_id(),
764 TAILQ_INIT(&l2_tn_info->l2_tn_list);
765 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
766 "l2_tn_%s", TDEV_NAME(eth_dev));
767 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
768 if (!l2_tn_info->hash_handle) {
769 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
772 l2_tn_info->hash_map = rte_zmalloc("txgbe",
773 sizeof(struct txgbe_l2_tn_filter *) *
774 TXGBE_MAX_L2_TN_FILTER_NUM,
776 if (!l2_tn_info->hash_map) {
778 "Failed to allocate memory for L2 TN hash map!");
781 l2_tn_info->e_tag_en = FALSE;
782 l2_tn_info->e_tag_fwd_en = FALSE;
783 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
789 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
790 struct rte_pci_device *pci_dev)
792 struct rte_eth_dev *pf_ethdev;
793 struct rte_eth_devargs eth_da;
796 if (pci_dev->device.devargs) {
797 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
802 memset(ð_da, 0, sizeof(eth_da));
805 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
806 sizeof(struct txgbe_adapter),
807 eth_dev_pci_specific_init, pci_dev,
808 eth_txgbe_dev_init, NULL);
810 if (retval || eth_da.nb_representor_ports < 1)
813 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
814 if (pf_ethdev == NULL)
820 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
822 struct rte_eth_dev *ethdev;
824 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
828 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
831 static struct rte_pci_driver rte_txgbe_pmd = {
832 .id_table = pci_id_txgbe_map,
833 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
834 RTE_PCI_DRV_INTR_LSC,
835 .probe = eth_txgbe_pci_probe,
836 .remove = eth_txgbe_pci_remove,
840 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
842 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
843 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
848 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
849 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
850 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
855 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
857 /* update local VFTA copy */
858 shadow_vfta->vfta[vid_idx] = vfta;
864 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
866 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
867 struct txgbe_rx_queue *rxq;
869 uint32_t rxcfg, rxbal, rxbah;
872 txgbe_vlan_hw_strip_enable(dev, queue);
874 txgbe_vlan_hw_strip_disable(dev, queue);
876 rxq = dev->data->rx_queues[queue];
877 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
878 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
879 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
880 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
881 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
882 !(rxcfg & TXGBE_RXCFG_VLAN);
883 rxcfg |= TXGBE_RXCFG_VLAN;
885 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
886 (rxcfg & TXGBE_RXCFG_VLAN);
887 rxcfg &= ~TXGBE_RXCFG_VLAN;
889 rxcfg &= ~TXGBE_RXCFG_ENA;
892 /* set vlan strip for ring */
893 txgbe_dev_rx_queue_stop(dev, queue);
894 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
895 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
896 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
897 txgbe_dev_rx_queue_start(dev, queue);
902 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
903 enum rte_vlan_type vlan_type,
906 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
908 uint32_t portctrl, vlan_ext, qinq;
910 portctrl = rd32(hw, TXGBE_PORTCTL);
912 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
913 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
915 case ETH_VLAN_TYPE_INNER:
917 wr32m(hw, TXGBE_VLANCTL,
918 TXGBE_VLANCTL_TPID_MASK,
919 TXGBE_VLANCTL_TPID(tpid));
920 wr32m(hw, TXGBE_DMATXCTRL,
921 TXGBE_DMATXCTRL_TPID_MASK,
922 TXGBE_DMATXCTRL_TPID(tpid));
925 PMD_DRV_LOG(ERR, "Inner type is not supported"
930 wr32m(hw, TXGBE_TAGTPID(0),
931 TXGBE_TAGTPID_LSB_MASK,
932 TXGBE_TAGTPID_LSB(tpid));
935 case ETH_VLAN_TYPE_OUTER:
937 /* Only the high 16-bits is valid */
938 wr32m(hw, TXGBE_EXTAG,
939 TXGBE_EXTAG_VLAN_MASK,
940 TXGBE_EXTAG_VLAN(tpid));
942 wr32m(hw, TXGBE_VLANCTL,
943 TXGBE_VLANCTL_TPID_MASK,
944 TXGBE_VLANCTL_TPID(tpid));
945 wr32m(hw, TXGBE_DMATXCTRL,
946 TXGBE_DMATXCTRL_TPID_MASK,
947 TXGBE_DMATXCTRL_TPID(tpid));
951 wr32m(hw, TXGBE_TAGTPID(0),
952 TXGBE_TAGTPID_MSB_MASK,
953 TXGBE_TAGTPID_MSB(tpid));
957 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
965 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
967 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
970 PMD_INIT_FUNC_TRACE();
972 /* Filter Table Disable */
973 vlnctrl = rd32(hw, TXGBE_VLANCTL);
974 vlnctrl &= ~TXGBE_VLANCTL_VFE;
975 wr32(hw, TXGBE_VLANCTL, vlnctrl);
979 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
981 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
982 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
986 PMD_INIT_FUNC_TRACE();
988 /* Filter Table Enable */
989 vlnctrl = rd32(hw, TXGBE_VLANCTL);
990 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
991 vlnctrl |= TXGBE_VLANCTL_VFE;
992 wr32(hw, TXGBE_VLANCTL, vlnctrl);
994 /* write whatever is in local vfta copy */
995 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
996 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1000 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1002 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1003 struct txgbe_rx_queue *rxq;
1005 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1009 TXGBE_SET_HWSTRIP(hwstrip, queue);
1011 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1013 if (queue >= dev->data->nb_rx_queues)
1016 rxq = dev->data->rx_queues[queue];
1019 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1020 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1022 rxq->vlan_flags = PKT_RX_VLAN;
1023 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1028 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1030 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1033 PMD_INIT_FUNC_TRACE();
1035 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1036 ctrl &= ~TXGBE_RXCFG_VLAN;
1037 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1039 /* record those setting for HW strip per queue */
1040 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1044 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1046 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1049 PMD_INIT_FUNC_TRACE();
1051 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1052 ctrl |= TXGBE_RXCFG_VLAN;
1053 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1055 /* record those setting for HW strip per queue */
1056 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1060 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1062 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1065 PMD_INIT_FUNC_TRACE();
1067 ctrl = rd32(hw, TXGBE_PORTCTL);
1068 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1069 ctrl &= ~TXGBE_PORTCTL_QINQ;
1070 wr32(hw, TXGBE_PORTCTL, ctrl);
1074 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1076 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1077 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1078 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1081 PMD_INIT_FUNC_TRACE();
1083 ctrl = rd32(hw, TXGBE_PORTCTL);
1084 ctrl |= TXGBE_PORTCTL_VLANEXT;
1085 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1086 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1087 ctrl |= TXGBE_PORTCTL_QINQ;
1088 wr32(hw, TXGBE_PORTCTL, ctrl);
1092 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1094 struct txgbe_rx_queue *rxq;
1097 PMD_INIT_FUNC_TRACE();
1099 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1100 rxq = dev->data->rx_queues[i];
1102 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1103 txgbe_vlan_strip_queue_set(dev, i, 1);
1105 txgbe_vlan_strip_queue_set(dev, i, 0);
1110 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1113 struct rte_eth_rxmode *rxmode;
1114 struct txgbe_rx_queue *rxq;
1116 if (mask & ETH_VLAN_STRIP_MASK) {
1117 rxmode = &dev->data->dev_conf.rxmode;
1118 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1119 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1120 rxq = dev->data->rx_queues[i];
1121 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1124 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1125 rxq = dev->data->rx_queues[i];
1126 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1132 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1134 struct rte_eth_rxmode *rxmode;
1135 rxmode = &dev->data->dev_conf.rxmode;
1137 if (mask & ETH_VLAN_STRIP_MASK)
1138 txgbe_vlan_hw_strip_config(dev);
1140 if (mask & ETH_VLAN_FILTER_MASK) {
1141 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1142 txgbe_vlan_hw_filter_enable(dev);
1144 txgbe_vlan_hw_filter_disable(dev);
1147 if (mask & ETH_VLAN_EXTEND_MASK) {
1148 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1149 txgbe_vlan_hw_extend_enable(dev);
1151 txgbe_vlan_hw_extend_disable(dev);
1158 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1160 txgbe_config_vlan_strip_on_all_queues(dev, mask);
1162 txgbe_vlan_offload_config(dev, mask);
1168 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1170 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1171 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1172 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1174 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1175 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1179 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1181 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1186 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1189 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1195 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1196 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1197 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1198 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1203 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1205 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1206 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1207 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1209 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1210 /* check multi-queue mode */
1211 switch (dev_conf->rxmode.mq_mode) {
1212 case ETH_MQ_RX_VMDQ_DCB:
1213 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1215 case ETH_MQ_RX_VMDQ_DCB_RSS:
1216 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1217 PMD_INIT_LOG(ERR, "SRIOV active,"
1218 " unsupported mq_mode rx %d.",
1219 dev_conf->rxmode.mq_mode);
1222 case ETH_MQ_RX_VMDQ_RSS:
1223 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1224 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1225 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1226 PMD_INIT_LOG(ERR, "SRIOV is active,"
1227 " invalid queue number"
1228 " for VMDQ RSS, allowed"
1229 " value are 1, 2 or 4.");
1233 case ETH_MQ_RX_VMDQ_ONLY:
1234 case ETH_MQ_RX_NONE:
1235 /* if nothing mq mode configure, use default scheme */
1236 dev->data->dev_conf.rxmode.mq_mode =
1237 ETH_MQ_RX_VMDQ_ONLY;
1239 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1240 /* SRIOV only works in VMDq enable mode */
1241 PMD_INIT_LOG(ERR, "SRIOV is active,"
1242 " wrong mq_mode rx %d.",
1243 dev_conf->rxmode.mq_mode);
1247 switch (dev_conf->txmode.mq_mode) {
1248 case ETH_MQ_TX_VMDQ_DCB:
1249 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1250 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1252 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1253 dev->data->dev_conf.txmode.mq_mode =
1254 ETH_MQ_TX_VMDQ_ONLY;
1258 /* check valid queue number */
1259 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1260 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1261 PMD_INIT_LOG(ERR, "SRIOV is active,"
1262 " nb_rx_q=%d nb_tx_q=%d queue number"
1263 " must be less than or equal to %d.",
1265 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1269 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1270 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1274 /* check configuration for vmdb+dcb mode */
1275 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1276 const struct rte_eth_vmdq_dcb_conf *conf;
1278 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1279 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1280 TXGBE_VMDQ_DCB_NB_QUEUES);
1283 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1284 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1285 conf->nb_queue_pools == ETH_32_POOLS)) {
1286 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1287 " nb_queue_pools must be %d or %d.",
1288 ETH_16_POOLS, ETH_32_POOLS);
1292 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1293 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1295 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1296 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1297 TXGBE_VMDQ_DCB_NB_QUEUES);
1300 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1301 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1302 conf->nb_queue_pools == ETH_32_POOLS)) {
1303 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1304 " nb_queue_pools != %d and"
1305 " nb_queue_pools != %d.",
1306 ETH_16_POOLS, ETH_32_POOLS);
1311 /* For DCB mode check our configuration before we go further */
1312 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1313 const struct rte_eth_dcb_rx_conf *conf;
1315 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1316 if (!(conf->nb_tcs == ETH_4_TCS ||
1317 conf->nb_tcs == ETH_8_TCS)) {
1318 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1319 " and nb_tcs != %d.",
1320 ETH_4_TCS, ETH_8_TCS);
1325 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1326 const struct rte_eth_dcb_tx_conf *conf;
1328 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1329 if (!(conf->nb_tcs == ETH_4_TCS ||
1330 conf->nb_tcs == ETH_8_TCS)) {
1331 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1332 " and nb_tcs != %d.",
1333 ETH_4_TCS, ETH_8_TCS);
1342 txgbe_dev_configure(struct rte_eth_dev *dev)
1344 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1345 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1348 PMD_INIT_FUNC_TRACE();
1350 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1351 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1353 /* multiple queue mode checking */
1354 ret = txgbe_check_mq_mode(dev);
1356 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1361 /* set flag to update link status after init */
1362 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1365 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1366 * allocation Rx preconditions we will reset it.
1368 adapter->rx_bulk_alloc_allowed = true;
1374 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1376 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1377 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1380 gpie = rd32(hw, TXGBE_GPIOINTEN);
1381 gpie |= TXGBE_GPIOBIT_6;
1382 wr32(hw, TXGBE_GPIOINTEN, gpie);
1383 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1387 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1388 uint16_t tx_rate, uint64_t q_msk)
1390 struct txgbe_hw *hw;
1391 struct txgbe_vf_info *vfinfo;
1392 struct rte_eth_link link;
1393 uint8_t nb_q_per_pool;
1394 uint32_t queue_stride;
1395 uint32_t queue_idx, idx = 0, vf_idx;
1397 uint16_t total_rate = 0;
1398 struct rte_pci_device *pci_dev;
1401 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1402 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1406 if (vf >= pci_dev->max_vfs)
1409 if (tx_rate > link.link_speed)
1415 hw = TXGBE_DEV_HW(dev);
1416 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1417 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1418 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1419 queue_idx = vf * queue_stride;
1420 queue_end = queue_idx + nb_q_per_pool - 1;
1421 if (queue_end >= hw->mac.max_tx_queues)
1425 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1428 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1430 total_rate += vfinfo[vf_idx].tx_rate[idx];
1436 /* Store tx_rate for this vf. */
1437 for (idx = 0; idx < nb_q_per_pool; idx++) {
1438 if (((uint64_t)0x1 << idx) & q_msk) {
1439 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1440 vfinfo[vf].tx_rate[idx] = tx_rate;
1441 total_rate += tx_rate;
1445 if (total_rate > dev->data->dev_link.link_speed) {
1446 /* Reset stored TX rate of the VF if it causes exceed
1449 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1453 /* Set ARBTXRATE of each queue/pool for vf X */
1454 for (; queue_idx <= queue_end; queue_idx++) {
1456 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1464 * Configure device link speed and setup link.
1465 * It returns 0 on success.
1468 txgbe_dev_start(struct rte_eth_dev *dev)
1470 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1471 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1472 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1473 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1474 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1475 uint32_t intr_vector = 0;
1477 bool link_up = false, negotiate = 0;
1479 uint32_t allowed_speeds = 0;
1483 uint32_t *link_speeds;
1485 PMD_INIT_FUNC_TRACE();
1487 /* TXGBE devices don't support:
1488 * - half duplex (checked afterwards for valid speeds)
1489 * - fixed speed: TODO implement
1491 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1493 "Invalid link_speeds for port %u, fix speed not supported",
1494 dev->data->port_id);
1498 /* Stop the link setup handler before resetting the HW. */
1499 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1501 /* disable uio/vfio intr/eventfd mapping */
1502 rte_intr_disable(intr_handle);
1505 hw->adapter_stopped = 0;
1508 /* reinitialize adapter
1509 * this calls reset and start
1511 hw->nb_rx_queues = dev->data->nb_rx_queues;
1512 hw->nb_tx_queues = dev->data->nb_tx_queues;
1513 status = txgbe_pf_reset_hw(hw);
1516 hw->mac.start_hw(hw);
1517 hw->mac.get_link_status = true;
1519 /* configure PF module if SRIOV enabled */
1520 txgbe_pf_host_configure(dev);
1522 txgbe_dev_phy_intr_setup(dev);
1524 /* check and configure queue intr-vector mapping */
1525 if ((rte_intr_cap_multiple(intr_handle) ||
1526 !RTE_ETH_DEV_SRIOV(dev).active) &&
1527 dev->data->dev_conf.intr_conf.rxq != 0) {
1528 intr_vector = dev->data->nb_rx_queues;
1529 if (rte_intr_efd_enable(intr_handle, intr_vector))
1533 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1534 intr_handle->intr_vec =
1535 rte_zmalloc("intr_vec",
1536 dev->data->nb_rx_queues * sizeof(int), 0);
1537 if (intr_handle->intr_vec == NULL) {
1538 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1539 " intr_vec", dev->data->nb_rx_queues);
1544 /* confiugre msix for sleep until rx interrupt */
1545 txgbe_configure_msix(dev);
1547 /* initialize transmission unit */
1548 txgbe_dev_tx_init(dev);
1550 /* This can fail when allocating mbufs for descriptor rings */
1551 err = txgbe_dev_rx_init(dev);
1553 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1557 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1558 ETH_VLAN_EXTEND_MASK;
1559 err = txgbe_vlan_offload_config(dev, mask);
1561 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1565 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1566 /* Enable vlan filtering for VMDq */
1567 txgbe_vmdq_vlan_hw_filter_enable(dev);
1570 /* Configure DCB hw */
1571 txgbe_configure_pb(dev);
1572 txgbe_configure_port(dev);
1573 txgbe_configure_dcb(dev);
1575 /* Restore vf rate limit */
1576 if (vfinfo != NULL) {
1577 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1578 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1579 if (vfinfo[vf].tx_rate[idx] != 0)
1580 txgbe_set_vf_rate_limit(dev, vf,
1581 vfinfo[vf].tx_rate[idx],
1585 err = txgbe_dev_rxtx_start(dev);
1587 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1591 /* Skip link setup if loopback mode is enabled. */
1592 if (hw->mac.type == txgbe_mac_raptor &&
1593 dev->data->dev_conf.lpbk_mode)
1594 goto skip_link_setup;
1596 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1597 err = hw->mac.setup_sfp(hw);
1602 if (hw->phy.media_type == txgbe_media_type_copper) {
1603 /* Turn on the copper */
1604 hw->phy.set_phy_power(hw, true);
1606 /* Turn on the laser */
1607 hw->mac.enable_tx_laser(hw);
1610 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1613 dev->data->dev_link.link_status = link_up;
1615 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1619 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1622 link_speeds = &dev->data->dev_conf.link_speeds;
1623 if (*link_speeds & ~allowed_speeds) {
1624 PMD_INIT_LOG(ERR, "Invalid link setting");
1629 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1630 speed = (TXGBE_LINK_SPEED_100M_FULL |
1631 TXGBE_LINK_SPEED_1GB_FULL |
1632 TXGBE_LINK_SPEED_10GB_FULL);
1634 if (*link_speeds & ETH_LINK_SPEED_10G)
1635 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1636 if (*link_speeds & ETH_LINK_SPEED_5G)
1637 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1638 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1639 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1640 if (*link_speeds & ETH_LINK_SPEED_1G)
1641 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1642 if (*link_speeds & ETH_LINK_SPEED_100M)
1643 speed |= TXGBE_LINK_SPEED_100M_FULL;
1646 err = hw->mac.setup_link(hw, speed, link_up);
1652 if (rte_intr_allow_others(intr_handle)) {
1653 /* check if lsc interrupt is enabled */
1654 if (dev->data->dev_conf.intr_conf.lsc != 0)
1655 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1657 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1658 txgbe_dev_macsec_interrupt_setup(dev);
1659 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1661 rte_intr_callback_unregister(intr_handle,
1662 txgbe_dev_interrupt_handler, dev);
1663 if (dev->data->dev_conf.intr_conf.lsc != 0)
1664 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1665 " no intr multiplex");
1668 /* check if rxq interrupt is enabled */
1669 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1670 rte_intr_dp_is_en(intr_handle))
1671 txgbe_dev_rxq_interrupt_setup(dev);
1673 /* enable uio/vfio intr/eventfd mapping */
1674 rte_intr_enable(intr_handle);
1676 /* resume enabled intr since hw reset */
1677 txgbe_enable_intr(dev);
1678 txgbe_filter_restore(dev);
1681 * Update link status right before return, because it may
1682 * start link configuration process in a separate thread.
1684 txgbe_dev_link_update(dev, 0);
1686 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1688 txgbe_read_stats_registers(hw, hw_stats);
1689 hw->offset_loaded = 1;
1694 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1695 txgbe_dev_clear_queues(dev);
1700 * Stop device: disable rx and tx functions to allow for reconfiguring.
1703 txgbe_dev_stop(struct rte_eth_dev *dev)
1705 struct rte_eth_link link;
1706 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1707 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1708 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1709 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1710 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1713 if (hw->adapter_stopped)
1716 PMD_INIT_FUNC_TRACE();
1718 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1720 /* disable interrupts */
1721 txgbe_disable_intr(hw);
1724 txgbe_pf_reset_hw(hw);
1725 hw->adapter_stopped = 0;
1730 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1731 vfinfo[vf].clear_to_send = false;
1733 if (hw->phy.media_type == txgbe_media_type_copper) {
1734 /* Turn off the copper */
1735 hw->phy.set_phy_power(hw, false);
1737 /* Turn off the laser */
1738 hw->mac.disable_tx_laser(hw);
1741 txgbe_dev_clear_queues(dev);
1743 /* Clear stored conf */
1744 dev->data->scattered_rx = 0;
1747 /* Clear recorded link status */
1748 memset(&link, 0, sizeof(link));
1749 rte_eth_linkstatus_set(dev, &link);
1751 if (!rte_intr_allow_others(intr_handle))
1752 /* resume to the default handler */
1753 rte_intr_callback_register(intr_handle,
1754 txgbe_dev_interrupt_handler,
1757 /* Clean datapath event and queue/vec mapping */
1758 rte_intr_efd_disable(intr_handle);
1759 if (intr_handle->intr_vec != NULL) {
1760 rte_free(intr_handle->intr_vec);
1761 intr_handle->intr_vec = NULL;
1764 adapter->rss_reta_updated = 0;
1765 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1767 hw->adapter_stopped = true;
1768 dev->data->dev_started = 0;
1774 * Set device link up: enable tx.
1777 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1779 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1781 if (hw->phy.media_type == txgbe_media_type_copper) {
1782 /* Turn on the copper */
1783 hw->phy.set_phy_power(hw, true);
1785 /* Turn on the laser */
1786 hw->mac.enable_tx_laser(hw);
1787 txgbe_dev_link_update(dev, 0);
1794 * Set device link down: disable tx.
1797 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1799 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1801 if (hw->phy.media_type == txgbe_media_type_copper) {
1802 /* Turn off the copper */
1803 hw->phy.set_phy_power(hw, false);
1805 /* Turn off the laser */
1806 hw->mac.disable_tx_laser(hw);
1807 txgbe_dev_link_update(dev, 0);
1814 * Reset and stop device.
1817 txgbe_dev_close(struct rte_eth_dev *dev)
1819 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1820 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1821 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1825 PMD_INIT_FUNC_TRACE();
1827 txgbe_pf_reset_hw(hw);
1829 ret = txgbe_dev_stop(dev);
1831 txgbe_dev_free_queues(dev);
1833 /* reprogram the RAR[0] in case user changed it. */
1834 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1836 /* Unlock any pending hardware semaphore */
1837 txgbe_swfw_lock_reset(hw);
1839 /* disable uio intr before callback unregister */
1840 rte_intr_disable(intr_handle);
1843 ret = rte_intr_callback_unregister(intr_handle,
1844 txgbe_dev_interrupt_handler, dev);
1845 if (ret >= 0 || ret == -ENOENT) {
1847 } else if (ret != -EAGAIN) {
1849 "intr callback unregister failed: %d",
1853 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1855 /* cancel the delay handler before remove dev */
1856 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1858 /* uninitialize PF if max_vfs not zero */
1859 txgbe_pf_host_uninit(dev);
1861 rte_free(dev->data->mac_addrs);
1862 dev->data->mac_addrs = NULL;
1864 rte_free(dev->data->hash_mac_addrs);
1865 dev->data->hash_mac_addrs = NULL;
1867 /* remove all the L2 tunnel filters & hash */
1868 txgbe_l2_tn_filter_uninit(dev);
1870 /* Remove all ntuple filters of the device */
1871 txgbe_ntuple_filter_uninit(dev);
1880 txgbe_dev_reset(struct rte_eth_dev *dev)
1884 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1885 * its VF to make them align with it. The detailed notification
1886 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1887 * To avoid unexpected behavior in VF, currently reset of PF with
1888 * SR-IOV activation is not supported. It might be supported later.
1890 if (dev->data->sriov.active)
1893 ret = eth_txgbe_dev_uninit(dev);
1897 ret = eth_txgbe_dev_init(dev, NULL);
1902 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1904 uint32_t current_counter = rd32(hw, reg); \
1905 if (current_counter < last_counter) \
1906 current_counter += 0x100000000LL; \
1907 if (!hw->offset_loaded) \
1908 last_counter = current_counter; \
1909 counter = current_counter - last_counter; \
1910 counter &= 0xFFFFFFFFLL; \
1913 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1915 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1916 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1917 uint64_t current_counter = (current_counter_msb << 32) | \
1918 current_counter_lsb; \
1919 if (current_counter < last_counter) \
1920 current_counter += 0x1000000000LL; \
1921 if (!hw->offset_loaded) \
1922 last_counter = current_counter; \
1923 counter = current_counter - last_counter; \
1924 counter &= 0xFFFFFFFFFLL; \
1928 txgbe_read_stats_registers(struct txgbe_hw *hw,
1929 struct txgbe_hw_stats *hw_stats)
1934 for (i = 0; i < hw->nb_rx_queues; i++) {
1935 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1936 hw->qp_last[i].rx_qp_packets,
1937 hw_stats->qp[i].rx_qp_packets);
1938 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1939 hw->qp_last[i].rx_qp_bytes,
1940 hw_stats->qp[i].rx_qp_bytes);
1941 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1942 hw->qp_last[i].rx_qp_mc_packets,
1943 hw_stats->qp[i].rx_qp_mc_packets);
1946 for (i = 0; i < hw->nb_tx_queues; i++) {
1947 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1948 hw->qp_last[i].tx_qp_packets,
1949 hw_stats->qp[i].tx_qp_packets);
1950 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1951 hw->qp_last[i].tx_qp_bytes,
1952 hw_stats->qp[i].tx_qp_bytes);
1955 for (i = 0; i < TXGBE_MAX_UP; i++) {
1956 hw_stats->up[i].rx_up_xon_packets +=
1957 rd32(hw, TXGBE_PBRXUPXON(i));
1958 hw_stats->up[i].rx_up_xoff_packets +=
1959 rd32(hw, TXGBE_PBRXUPXOFF(i));
1960 hw_stats->up[i].tx_up_xon_packets +=
1961 rd32(hw, TXGBE_PBTXUPXON(i));
1962 hw_stats->up[i].tx_up_xoff_packets +=
1963 rd32(hw, TXGBE_PBTXUPXOFF(i));
1964 hw_stats->up[i].tx_up_xon2off_packets +=
1965 rd32(hw, TXGBE_PBTXUPOFF(i));
1966 hw_stats->up[i].rx_up_dropped +=
1967 rd32(hw, TXGBE_PBRXMISS(i));
1969 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1970 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1971 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1972 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1975 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1976 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1978 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1979 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1980 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1983 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1984 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1985 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1987 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1988 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1989 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1991 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1992 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1994 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1995 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1996 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1997 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1998 hw_stats->rx_size_512_to_1023_packets +=
1999 rd64(hw, TXGBE_MACRX512TO1023L);
2000 hw_stats->rx_size_1024_to_max_packets +=
2001 rd64(hw, TXGBE_MACRX1024TOMAXL);
2002 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2003 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2004 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2005 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2006 hw_stats->tx_size_512_to_1023_packets +=
2007 rd64(hw, TXGBE_MACTX512TO1023L);
2008 hw_stats->tx_size_1024_to_max_packets +=
2009 rd64(hw, TXGBE_MACTX1024TOMAXL);
2011 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2012 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2013 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2016 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2017 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2018 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2019 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2022 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2023 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2024 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2025 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2026 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2027 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2028 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2030 /* Flow Director Stats */
2031 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2032 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2033 hw_stats->flow_director_added_filters +=
2034 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2035 hw_stats->flow_director_removed_filters +=
2036 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2037 hw_stats->flow_director_filter_add_errors +=
2038 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2039 hw_stats->flow_director_filter_remove_errors +=
2040 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2043 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2044 hw_stats->tx_macsec_pkts_encrypted +=
2045 rd32(hw, TXGBE_LSECTX_ENCPKT);
2046 hw_stats->tx_macsec_pkts_protected +=
2047 rd32(hw, TXGBE_LSECTX_PROTPKT);
2048 hw_stats->tx_macsec_octets_encrypted +=
2049 rd32(hw, TXGBE_LSECTX_ENCOCT);
2050 hw_stats->tx_macsec_octets_protected +=
2051 rd32(hw, TXGBE_LSECTX_PROTOCT);
2052 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2053 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2054 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2055 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2056 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2057 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2058 hw_stats->rx_macsec_sc_pkts_unchecked +=
2059 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2060 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2061 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2062 for (i = 0; i < 2; i++) {
2063 hw_stats->rx_macsec_sa_pkts_ok +=
2064 rd32(hw, TXGBE_LSECRX_OKPKT(i));
2065 hw_stats->rx_macsec_sa_pkts_invalid +=
2066 rd32(hw, TXGBE_LSECRX_INVPKT(i));
2067 hw_stats->rx_macsec_sa_pkts_notvalid +=
2068 rd32(hw, TXGBE_LSECRX_BADPKT(i));
2070 hw_stats->rx_macsec_sa_pkts_unusedsa +=
2071 rd32(hw, TXGBE_LSECRX_INVSAPKT);
2072 hw_stats->rx_macsec_sa_pkts_notusingsa +=
2073 rd32(hw, TXGBE_LSECRX_BADSAPKT);
2075 hw_stats->rx_total_missed_packets = 0;
2076 for (i = 0; i < TXGBE_MAX_UP; i++) {
2077 hw_stats->rx_total_missed_packets +=
2078 hw_stats->up[i].rx_up_dropped;
2083 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2085 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2086 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2087 struct txgbe_stat_mappings *stat_mappings =
2088 TXGBE_DEV_STAT_MAPPINGS(dev);
2091 txgbe_read_stats_registers(hw, hw_stats);
2096 /* Fill out the rte_eth_stats statistics structure */
2097 stats->ipackets = hw_stats->rx_packets;
2098 stats->ibytes = hw_stats->rx_bytes;
2099 stats->opackets = hw_stats->tx_packets;
2100 stats->obytes = hw_stats->tx_bytes;
2102 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2103 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2104 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2105 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2106 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2107 for (i = 0; i < TXGBE_MAX_QP; i++) {
2108 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2109 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2112 q_map = (stat_mappings->rqsm[n] >> offset)
2113 & QMAP_FIELD_RESERVED_BITS_MASK;
2114 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2115 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2116 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2117 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2119 q_map = (stat_mappings->tqsm[n] >> offset)
2120 & QMAP_FIELD_RESERVED_BITS_MASK;
2121 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2122 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2123 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2124 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2128 stats->imissed = hw_stats->rx_total_missed_packets;
2129 stats->ierrors = hw_stats->rx_crc_errors +
2130 hw_stats->rx_mac_short_packet_dropped +
2131 hw_stats->rx_length_errors +
2132 hw_stats->rx_undersize_errors +
2133 hw_stats->rx_oversize_errors +
2134 hw_stats->rx_drop_packets +
2135 hw_stats->rx_illegal_byte_errors +
2136 hw_stats->rx_error_bytes +
2137 hw_stats->rx_fragment_errors +
2138 hw_stats->rx_fcoe_crc_errors +
2139 hw_stats->rx_fcoe_mbuf_allocation_errors;
2147 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2149 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2150 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2152 /* HW registers are cleared on read */
2153 hw->offset_loaded = 0;
2154 txgbe_dev_stats_get(dev, NULL);
2155 hw->offset_loaded = 1;
2157 /* Reset software totals */
2158 memset(hw_stats, 0, sizeof(*hw_stats));
2163 /* This function calculates the number of xstats based on the current config */
2165 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2167 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2168 return TXGBE_NB_HW_STATS +
2169 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2170 TXGBE_NB_QP_STATS * nb_queues;
2174 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2178 /* Extended stats from txgbe_hw_stats */
2179 if (id < TXGBE_NB_HW_STATS) {
2180 snprintf(name, size, "[hw]%s",
2181 rte_txgbe_stats_strings[id].name);
2184 id -= TXGBE_NB_HW_STATS;
2186 /* Priority Stats */
2187 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2188 nb = id / TXGBE_NB_UP_STATS;
2189 st = id % TXGBE_NB_UP_STATS;
2190 snprintf(name, size, "[p%u]%s", nb,
2191 rte_txgbe_up_strings[st].name);
2194 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2197 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2198 nb = id / TXGBE_NB_QP_STATS;
2199 st = id % TXGBE_NB_QP_STATS;
2200 snprintf(name, size, "[q%u]%s", nb,
2201 rte_txgbe_qp_strings[st].name);
2204 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2206 return -(int)(id + 1);
2210 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2214 /* Extended stats from txgbe_hw_stats */
2215 if (id < TXGBE_NB_HW_STATS) {
2216 *offset = rte_txgbe_stats_strings[id].offset;
2219 id -= TXGBE_NB_HW_STATS;
2221 /* Priority Stats */
2222 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2223 nb = id / TXGBE_NB_UP_STATS;
2224 st = id % TXGBE_NB_UP_STATS;
2225 *offset = rte_txgbe_up_strings[st].offset +
2226 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2229 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2232 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2233 nb = id / TXGBE_NB_QP_STATS;
2234 st = id % TXGBE_NB_QP_STATS;
2235 *offset = rte_txgbe_qp_strings[st].offset +
2236 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2243 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2244 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2246 unsigned int i, count;
2248 count = txgbe_xstats_calc_num(dev);
2249 if (xstats_names == NULL)
2252 /* Note: limit >= cnt_stats checked upstream
2253 * in rte_eth_xstats_names()
2255 limit = min(limit, count);
2257 /* Extended stats from txgbe_hw_stats */
2258 for (i = 0; i < limit; i++) {
2259 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2260 sizeof(xstats_names[i].name))) {
2261 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2269 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2270 struct rte_eth_xstat_name *xstats_names,
2271 const uint64_t *ids,
2277 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2279 for (i = 0; i < limit; i++) {
2280 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2281 sizeof(xstats_names[i].name))) {
2282 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2291 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2294 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2295 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2296 unsigned int i, count;
2298 txgbe_read_stats_registers(hw, hw_stats);
2300 /* If this is a reset xstats is NULL, and we have cleared the
2301 * registers by reading them.
2303 count = txgbe_xstats_calc_num(dev);
2307 limit = min(limit, txgbe_xstats_calc_num(dev));
2309 /* Extended stats from txgbe_hw_stats */
2310 for (i = 0; i < limit; i++) {
2311 uint32_t offset = 0;
2313 if (txgbe_get_offset_by_id(i, &offset)) {
2314 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2317 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2325 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2328 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2329 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2330 unsigned int i, count;
2332 txgbe_read_stats_registers(hw, hw_stats);
2334 /* If this is a reset xstats is NULL, and we have cleared the
2335 * registers by reading them.
2337 count = txgbe_xstats_calc_num(dev);
2341 limit = min(limit, txgbe_xstats_calc_num(dev));
2343 /* Extended stats from txgbe_hw_stats */
2344 for (i = 0; i < limit; i++) {
2347 if (txgbe_get_offset_by_id(i, &offset)) {
2348 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2351 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2358 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2359 uint64_t *values, unsigned int limit)
2361 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2365 return txgbe_dev_xstats_get_(dev, values, limit);
2367 for (i = 0; i < limit; i++) {
2370 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2371 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2374 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2381 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2383 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2384 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2386 /* HW registers are cleared on read */
2387 hw->offset_loaded = 0;
2388 txgbe_read_stats_registers(hw, hw_stats);
2389 hw->offset_loaded = 1;
2391 /* Reset software totals */
2392 memset(hw_stats, 0, sizeof(*hw_stats));
2398 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2400 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2401 u16 eeprom_verh, eeprom_verl;
2405 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2406 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2408 etrack_id = (eeprom_verh << 16) | eeprom_verl;
2409 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2411 ret += 1; /* add the size of '\0' */
2412 if (fw_size < (u32)ret)
2419 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2421 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2422 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2424 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2425 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2426 dev_info->min_rx_bufsize = 1024;
2427 dev_info->max_rx_pktlen = 15872;
2428 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2429 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2430 dev_info->max_vfs = pci_dev->max_vfs;
2431 dev_info->max_vmdq_pools = ETH_64_POOLS;
2432 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2433 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2434 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2435 dev_info->rx_queue_offload_capa);
2436 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2437 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2439 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2441 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2442 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2443 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2445 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2450 dev_info->default_txconf = (struct rte_eth_txconf) {
2452 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2453 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2454 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2456 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2460 dev_info->rx_desc_lim = rx_desc_lim;
2461 dev_info->tx_desc_lim = tx_desc_lim;
2463 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2464 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2465 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2467 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2468 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2470 /* Driver-preferred Rx/Tx parameters */
2471 dev_info->default_rxportconf.burst_size = 32;
2472 dev_info->default_txportconf.burst_size = 32;
2473 dev_info->default_rxportconf.nb_queues = 1;
2474 dev_info->default_txportconf.nb_queues = 1;
2475 dev_info->default_rxportconf.ring_size = 256;
2476 dev_info->default_txportconf.ring_size = 256;
2482 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2484 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2485 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2486 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2487 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2488 return txgbe_get_supported_ptypes();
2494 txgbe_dev_setup_link_alarm_handler(void *param)
2496 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2497 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2498 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2500 bool autoneg = false;
2502 speed = hw->phy.autoneg_advertised;
2504 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2506 hw->mac.setup_link(hw, speed, true);
2508 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2511 /* return 0 means link status changed, -1 means not changed */
2513 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2514 int wait_to_complete)
2516 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2517 struct rte_eth_link link;
2518 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2519 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2524 memset(&link, 0, sizeof(link));
2525 link.link_status = ETH_LINK_DOWN;
2526 link.link_speed = ETH_SPEED_NUM_NONE;
2527 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2528 link.link_autoneg = ETH_LINK_AUTONEG;
2530 hw->mac.get_link_status = true;
2532 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2533 return rte_eth_linkstatus_set(dev, &link);
2535 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2536 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2539 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2542 link.link_speed = ETH_SPEED_NUM_100M;
2543 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2544 return rte_eth_linkstatus_set(dev, &link);
2548 if (hw->phy.media_type == txgbe_media_type_fiber) {
2549 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2550 rte_eal_alarm_set(10,
2551 txgbe_dev_setup_link_alarm_handler, dev);
2553 return rte_eth_linkstatus_set(dev, &link);
2556 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2557 link.link_status = ETH_LINK_UP;
2558 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2560 switch (link_speed) {
2562 case TXGBE_LINK_SPEED_UNKNOWN:
2563 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2564 link.link_speed = ETH_SPEED_NUM_100M;
2567 case TXGBE_LINK_SPEED_100M_FULL:
2568 link.link_speed = ETH_SPEED_NUM_100M;
2571 case TXGBE_LINK_SPEED_1GB_FULL:
2572 link.link_speed = ETH_SPEED_NUM_1G;
2575 case TXGBE_LINK_SPEED_2_5GB_FULL:
2576 link.link_speed = ETH_SPEED_NUM_2_5G;
2579 case TXGBE_LINK_SPEED_5GB_FULL:
2580 link.link_speed = ETH_SPEED_NUM_5G;
2583 case TXGBE_LINK_SPEED_10GB_FULL:
2584 link.link_speed = ETH_SPEED_NUM_10G;
2588 return rte_eth_linkstatus_set(dev, &link);
2592 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2594 return txgbe_dev_link_update_share(dev, wait_to_complete);
2598 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2600 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2603 fctrl = rd32(hw, TXGBE_PSRCTL);
2604 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2605 wr32(hw, TXGBE_PSRCTL, fctrl);
2611 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2613 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2616 fctrl = rd32(hw, TXGBE_PSRCTL);
2617 fctrl &= (~TXGBE_PSRCTL_UCP);
2618 if (dev->data->all_multicast == 1)
2619 fctrl |= TXGBE_PSRCTL_MCP;
2621 fctrl &= (~TXGBE_PSRCTL_MCP);
2622 wr32(hw, TXGBE_PSRCTL, fctrl);
2628 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2630 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2633 fctrl = rd32(hw, TXGBE_PSRCTL);
2634 fctrl |= TXGBE_PSRCTL_MCP;
2635 wr32(hw, TXGBE_PSRCTL, fctrl);
2641 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2643 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2646 if (dev->data->promiscuous == 1)
2647 return 0; /* must remain in all_multicast mode */
2649 fctrl = rd32(hw, TXGBE_PSRCTL);
2650 fctrl &= (~TXGBE_PSRCTL_MCP);
2651 wr32(hw, TXGBE_PSRCTL, fctrl);
2657 * It clears the interrupt causes and enables the interrupt.
2658 * It will be called once only during nic initialized.
2661 * Pointer to struct rte_eth_dev.
2663 * Enable or Disable.
2666 * - On success, zero.
2667 * - On failure, a negative value.
2670 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2672 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2674 txgbe_dev_link_status_print(dev);
2676 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2678 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2684 * It clears the interrupt causes and enables the interrupt.
2685 * It will be called once only during nic initialized.
2688 * Pointer to struct rte_eth_dev.
2691 * - On success, zero.
2692 * - On failure, a negative value.
2695 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2697 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2699 intr->mask[0] |= TXGBE_ICR_MASK;
2700 intr->mask[1] |= TXGBE_ICR_MASK;
2706 * It clears the interrupt causes and enables the interrupt.
2707 * It will be called once only during nic initialized.
2710 * Pointer to struct rte_eth_dev.
2713 * - On success, zero.
2714 * - On failure, a negative value.
2717 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2719 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2721 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2727 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2730 * Pointer to struct rte_eth_dev.
2733 * - On success, zero.
2734 * - On failure, a negative value.
2737 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2740 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2741 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2743 /* clear all cause mask */
2744 txgbe_disable_intr(hw);
2746 /* read-on-clear nic registers here */
2747 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2748 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2752 /* set flag for async link update */
2753 if (eicr & TXGBE_ICRMISC_LSC)
2754 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2756 if (eicr & TXGBE_ICRMISC_VFMBX)
2757 intr->flags |= TXGBE_FLAG_MAILBOX;
2759 if (eicr & TXGBE_ICRMISC_LNKSEC)
2760 intr->flags |= TXGBE_FLAG_MACSEC;
2762 if (eicr & TXGBE_ICRMISC_GPIO)
2763 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2769 * It gets and then prints the link status.
2772 * Pointer to struct rte_eth_dev.
2775 * - On success, zero.
2776 * - On failure, a negative value.
2779 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2781 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2782 struct rte_eth_link link;
2784 rte_eth_linkstatus_get(dev, &link);
2786 if (link.link_status) {
2787 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2788 (int)(dev->data->port_id),
2789 (unsigned int)link.link_speed,
2790 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2791 "full-duplex" : "half-duplex");
2793 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2794 (int)(dev->data->port_id));
2796 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2797 pci_dev->addr.domain,
2799 pci_dev->addr.devid,
2800 pci_dev->addr.function);
2804 * It executes link_update after knowing an interrupt occurred.
2807 * Pointer to struct rte_eth_dev.
2810 * - On success, zero.
2811 * - On failure, a negative value.
2814 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2815 struct rte_intr_handle *intr_handle)
2817 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2819 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2821 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2823 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2824 txgbe_pf_mbx_process(dev);
2825 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2828 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2829 hw->phy.handle_lasi(hw);
2830 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2833 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2834 struct rte_eth_link link;
2836 /*get the link status before link update, for predicting later*/
2837 rte_eth_linkstatus_get(dev, &link);
2839 txgbe_dev_link_update(dev, 0);
2842 if (!link.link_status)
2843 /* handle it 1 sec later, wait it being stable */
2844 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2845 /* likely to down */
2847 /* handle it 4 sec later, wait it being stable */
2848 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2850 txgbe_dev_link_status_print(dev);
2851 if (rte_eal_alarm_set(timeout * 1000,
2852 txgbe_dev_interrupt_delayed_handler,
2854 PMD_DRV_LOG(ERR, "Error setting alarm");
2856 /* remember original mask */
2857 intr->mask_misc_orig = intr->mask_misc;
2858 /* only disable lsc interrupt */
2859 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2863 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2864 txgbe_enable_intr(dev);
2865 rte_intr_enable(intr_handle);
2871 * Interrupt handler which shall be registered for alarm callback for delayed
2872 * handling specific interrupt to wait for the stable nic state. As the
2873 * NIC interrupt state is not stable for txgbe after link is just down,
2874 * it needs to wait 4 seconds to get the stable status.
2877 * Pointer to interrupt handle.
2879 * The address of parameter (struct rte_eth_dev *) registered before.
2885 txgbe_dev_interrupt_delayed_handler(void *param)
2887 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2888 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2889 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2890 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2891 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2894 txgbe_disable_intr(hw);
2896 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2897 if (eicr & TXGBE_ICRMISC_VFMBX)
2898 txgbe_pf_mbx_process(dev);
2900 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2901 hw->phy.handle_lasi(hw);
2902 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2905 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2906 txgbe_dev_link_update(dev, 0);
2907 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2908 txgbe_dev_link_status_print(dev);
2909 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2913 if (intr->flags & TXGBE_FLAG_MACSEC) {
2914 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2916 intr->flags &= ~TXGBE_FLAG_MACSEC;
2919 /* restore original mask */
2920 intr->mask_misc = intr->mask_misc_orig;
2921 intr->mask_misc_orig = 0;
2923 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2924 txgbe_enable_intr(dev);
2925 rte_intr_enable(intr_handle);
2929 * Interrupt handler triggered by NIC for handling
2930 * specific interrupt.
2933 * Pointer to interrupt handle.
2935 * The address of parameter (struct rte_eth_dev *) registered before.
2941 txgbe_dev_interrupt_handler(void *param)
2943 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2945 txgbe_dev_interrupt_get_status(dev);
2946 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2950 txgbe_dev_led_on(struct rte_eth_dev *dev)
2952 struct txgbe_hw *hw;
2954 hw = TXGBE_DEV_HW(dev);
2955 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
2959 txgbe_dev_led_off(struct rte_eth_dev *dev)
2961 struct txgbe_hw *hw;
2963 hw = TXGBE_DEV_HW(dev);
2964 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
2968 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2970 struct txgbe_hw *hw;
2976 hw = TXGBE_DEV_HW(dev);
2978 fc_conf->pause_time = hw->fc.pause_time;
2979 fc_conf->high_water = hw->fc.high_water[0];
2980 fc_conf->low_water = hw->fc.low_water[0];
2981 fc_conf->send_xon = hw->fc.send_xon;
2982 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2985 * Return rx_pause status according to actual setting of
2988 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
2989 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
2995 * Return tx_pause status according to actual setting of
2998 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
2999 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3004 if (rx_pause && tx_pause)
3005 fc_conf->mode = RTE_FC_FULL;
3007 fc_conf->mode = RTE_FC_RX_PAUSE;
3009 fc_conf->mode = RTE_FC_TX_PAUSE;
3011 fc_conf->mode = RTE_FC_NONE;
3017 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3019 struct txgbe_hw *hw;
3021 uint32_t rx_buf_size;
3022 uint32_t max_high_water;
3023 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3030 PMD_INIT_FUNC_TRACE();
3032 hw = TXGBE_DEV_HW(dev);
3033 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3034 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3037 * At least reserve one Ethernet frame for watermark
3038 * high_water/low_water in kilo bytes for txgbe
3040 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3041 if (fc_conf->high_water > max_high_water ||
3042 fc_conf->high_water < fc_conf->low_water) {
3043 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3044 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3048 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3049 hw->fc.pause_time = fc_conf->pause_time;
3050 hw->fc.high_water[0] = fc_conf->high_water;
3051 hw->fc.low_water[0] = fc_conf->low_water;
3052 hw->fc.send_xon = fc_conf->send_xon;
3053 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3055 err = txgbe_fc_enable(hw);
3057 /* Not negotiated is not an error case */
3058 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3059 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3060 (fc_conf->mac_ctrl_frame_fwd
3061 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3067 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3072 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3073 struct rte_eth_pfc_conf *pfc_conf)
3076 uint32_t rx_buf_size;
3077 uint32_t max_high_water;
3079 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
3080 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3081 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3083 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3090 PMD_INIT_FUNC_TRACE();
3092 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3093 tc_num = map[pfc_conf->priority];
3094 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3095 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3097 * At least reserve one Ethernet frame for watermark
3098 * high_water/low_water in kilo bytes for txgbe
3100 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3101 if (pfc_conf->fc.high_water > max_high_water ||
3102 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3103 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3104 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3108 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3109 hw->fc.pause_time = pfc_conf->fc.pause_time;
3110 hw->fc.send_xon = pfc_conf->fc.send_xon;
3111 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3112 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3114 err = txgbe_dcb_pfc_enable(hw, tc_num);
3116 /* Not negotiated is not an error case */
3117 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3120 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3125 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3126 struct rte_eth_rss_reta_entry64 *reta_conf,
3131 uint16_t idx, shift;
3132 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3135 PMD_INIT_FUNC_TRACE();
3137 if (!txgbe_rss_update_sp(hw->mac.type)) {
3138 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3143 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3144 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3145 "(%d) doesn't match the number hardware can supported "
3146 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3150 for (i = 0; i < reta_size; i += 4) {
3151 idx = i / RTE_RETA_GROUP_SIZE;
3152 shift = i % RTE_RETA_GROUP_SIZE;
3153 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3157 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3158 for (j = 0; j < 4; j++) {
3159 if (RS8(mask, j, 0x1)) {
3160 reta &= ~(MS32(8 * j, 0xFF));
3161 reta |= LS32(reta_conf[idx].reta[shift + j],
3165 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3167 adapter->rss_reta_updated = 1;
3173 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3174 struct rte_eth_rss_reta_entry64 *reta_conf,
3177 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3180 uint16_t idx, shift;
3182 PMD_INIT_FUNC_TRACE();
3184 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3185 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3186 "(%d) doesn't match the number hardware can supported "
3187 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3191 for (i = 0; i < reta_size; i += 4) {
3192 idx = i / RTE_RETA_GROUP_SIZE;
3193 shift = i % RTE_RETA_GROUP_SIZE;
3194 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3198 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3199 for (j = 0; j < 4; j++) {
3200 if (RS8(mask, j, 0x1))
3201 reta_conf[idx].reta[shift + j] =
3202 (uint16_t)RS32(reta, 8 * j, 0xFF);
3210 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3211 uint32_t index, uint32_t pool)
3213 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3214 uint32_t enable_addr = 1;
3216 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3221 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3223 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3225 txgbe_clear_rar(hw, index);
3229 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3231 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3233 txgbe_remove_rar(dev, 0);
3234 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3240 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3242 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3243 struct rte_eth_dev_info dev_info;
3244 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3245 struct rte_eth_dev_data *dev_data = dev->data;
3248 ret = txgbe_dev_info_get(dev, &dev_info);
3252 /* check that mtu is within the allowed range */
3253 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3256 /* If device is started, refuse mtu that requires the support of
3257 * scattered packets when this feature has not been enabled before.
3259 if (dev_data->dev_started && !dev_data->scattered_rx &&
3260 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3261 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3262 PMD_INIT_LOG(ERR, "Stop port first.");
3266 /* update max frame size */
3267 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3270 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3271 TXGBE_FRAME_SIZE_MAX);
3273 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3274 TXGBE_FRMSZ_MAX(frame_size));
3280 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3282 uint32_t vector = 0;
3284 switch (hw->mac.mc_filter_type) {
3285 case 0: /* use bits [47:36] of the address */
3286 vector = ((uc_addr->addr_bytes[4] >> 4) |
3287 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3289 case 1: /* use bits [46:35] of the address */
3290 vector = ((uc_addr->addr_bytes[4] >> 3) |
3291 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3293 case 2: /* use bits [45:34] of the address */
3294 vector = ((uc_addr->addr_bytes[4] >> 2) |
3295 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3297 case 3: /* use bits [43:32] of the address */
3298 vector = ((uc_addr->addr_bytes[4]) |
3299 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3301 default: /* Invalid mc_filter_type */
3305 /* vector can only be 12-bits or boundary will be exceeded */
3311 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3312 struct rte_ether_addr *mac_addr, uint8_t on)
3320 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3321 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3323 /* The UTA table only exists on pf hardware */
3324 if (hw->mac.type < txgbe_mac_raptor)
3327 vector = txgbe_uta_vector(hw, mac_addr);
3328 uta_idx = (vector >> 5) & 0x7F;
3329 uta_mask = 0x1UL << (vector & 0x1F);
3331 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3334 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3336 uta_info->uta_in_use++;
3337 reg_val |= uta_mask;
3338 uta_info->uta_shadow[uta_idx] |= uta_mask;
3340 uta_info->uta_in_use--;
3341 reg_val &= ~uta_mask;
3342 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3345 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3347 psrctl = rd32(hw, TXGBE_PSRCTL);
3348 if (uta_info->uta_in_use > 0)
3349 psrctl |= TXGBE_PSRCTL_UCHFENA;
3351 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3353 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3354 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3355 wr32(hw, TXGBE_PSRCTL, psrctl);
3361 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3363 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3364 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3368 /* The UTA table only exists on pf hardware */
3369 if (hw->mac.type < txgbe_mac_raptor)
3373 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3374 uta_info->uta_shadow[i] = ~0;
3375 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3378 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3379 uta_info->uta_shadow[i] = 0;
3380 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3384 psrctl = rd32(hw, TXGBE_PSRCTL);
3386 psrctl |= TXGBE_PSRCTL_UCHFENA;
3388 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3390 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3391 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3392 wr32(hw, TXGBE_PSRCTL, psrctl);
3398 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3400 uint32_t new_val = orig_val;
3402 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3403 new_val |= TXGBE_POOLETHCTL_UTA;
3404 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3405 new_val |= TXGBE_POOLETHCTL_MCHA;
3406 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3407 new_val |= TXGBE_POOLETHCTL_UCHA;
3408 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3409 new_val |= TXGBE_POOLETHCTL_BCA;
3410 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3411 new_val |= TXGBE_POOLETHCTL_MCP;
3417 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3419 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3420 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3422 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3424 if (queue_id < 32) {
3425 mask = rd32(hw, TXGBE_IMS(0));
3426 mask &= (1 << queue_id);
3427 wr32(hw, TXGBE_IMS(0), mask);
3428 } else if (queue_id < 64) {
3429 mask = rd32(hw, TXGBE_IMS(1));
3430 mask &= (1 << (queue_id - 32));
3431 wr32(hw, TXGBE_IMS(1), mask);
3433 rte_intr_enable(intr_handle);
3439 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3442 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3444 if (queue_id < 32) {
3445 mask = rd32(hw, TXGBE_IMS(0));
3446 mask &= ~(1 << queue_id);
3447 wr32(hw, TXGBE_IMS(0), mask);
3448 } else if (queue_id < 64) {
3449 mask = rd32(hw, TXGBE_IMS(1));
3450 mask &= ~(1 << (queue_id - 32));
3451 wr32(hw, TXGBE_IMS(1), mask);
3458 * set the IVAR registers, mapping interrupt causes to vectors
3460 * pointer to txgbe_hw struct
3462 * 0 for Rx, 1 for Tx, -1 for other causes
3464 * queue to map the corresponding interrupt to
3466 * the vector to map to the corresponding queue
3469 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3470 uint8_t queue, uint8_t msix_vector)
3474 if (direction == -1) {
3476 msix_vector |= TXGBE_IVARMISC_VLD;
3478 tmp = rd32(hw, TXGBE_IVARMISC);
3479 tmp &= ~(0xFF << idx);
3480 tmp |= (msix_vector << idx);
3481 wr32(hw, TXGBE_IVARMISC, tmp);
3483 /* rx or tx causes */
3484 /* Workround for ICR lost */
3485 idx = ((16 * (queue & 1)) + (8 * direction));
3486 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3487 tmp &= ~(0xFF << idx);
3488 tmp |= (msix_vector << idx);
3489 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3494 * Sets up the hardware to properly generate MSI-X interrupts
3496 * board private structure
3499 txgbe_configure_msix(struct rte_eth_dev *dev)
3501 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3502 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3503 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3504 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3505 uint32_t vec = TXGBE_MISC_VEC_ID;
3508 /* won't configure msix register if no mapping is done
3509 * between intr vector and event fd
3510 * but if misx has been enabled already, need to configure
3511 * auto clean, auto mask and throttling.
3513 gpie = rd32(hw, TXGBE_GPIE);
3514 if (!rte_intr_dp_is_en(intr_handle) &&
3515 !(gpie & TXGBE_GPIE_MSIX))
3518 if (rte_intr_allow_others(intr_handle)) {
3519 base = TXGBE_RX_VEC_START;
3523 /* setup GPIE for MSI-x mode */
3524 gpie = rd32(hw, TXGBE_GPIE);
3525 gpie |= TXGBE_GPIE_MSIX;
3526 wr32(hw, TXGBE_GPIE, gpie);
3528 /* Populate the IVAR table and set the ITR values to the
3529 * corresponding register.
3531 if (rte_intr_dp_is_en(intr_handle)) {
3532 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3534 /* by default, 1:1 mapping */
3535 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3536 intr_handle->intr_vec[queue_id] = vec;
3537 if (vec < base + intr_handle->nb_efd - 1)
3541 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3543 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3544 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3549 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3550 uint16_t queue_idx, uint16_t tx_rate)
3552 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3555 if (queue_idx >= hw->mac.max_tx_queues)
3559 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3560 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3566 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3567 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3569 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3571 /* Set ARBTXRATE of queue X */
3572 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3573 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3580 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3581 struct rte_eth_syn_filter *filter,
3584 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3585 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3589 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3592 syn_info = filter_info->syn_info;
3595 if (syn_info & TXGBE_SYNCLS_ENA)
3597 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3598 synqf |= TXGBE_SYNCLS_ENA;
3600 if (filter->hig_pri)
3601 synqf |= TXGBE_SYNCLS_HIPRIO;
3603 synqf &= ~TXGBE_SYNCLS_HIPRIO;
3605 synqf = rd32(hw, TXGBE_SYNCLS);
3606 if (!(syn_info & TXGBE_SYNCLS_ENA))
3608 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3611 filter_info->syn_info = synqf;
3612 wr32(hw, TXGBE_SYNCLS, synqf);
3617 static inline enum txgbe_5tuple_protocol
3618 convert_protocol_type(uint8_t protocol_value)
3620 if (protocol_value == IPPROTO_TCP)
3621 return TXGBE_5TF_PROT_TCP;
3622 else if (protocol_value == IPPROTO_UDP)
3623 return TXGBE_5TF_PROT_UDP;
3624 else if (protocol_value == IPPROTO_SCTP)
3625 return TXGBE_5TF_PROT_SCTP;
3627 return TXGBE_5TF_PROT_NONE;
3630 /* inject a 5-tuple filter to HW */
3632 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3633 struct txgbe_5tuple_filter *filter)
3635 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3637 uint32_t ftqf, sdpqf;
3638 uint32_t l34timir = 0;
3639 uint32_t mask = TXGBE_5TFCTL0_MASK;
3642 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3643 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3645 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3646 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3647 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3648 mask &= ~TXGBE_5TFCTL0_MSADDR;
3649 if (filter->filter_info.dst_ip_mask == 0)
3650 mask &= ~TXGBE_5TFCTL0_MDADDR;
3651 if (filter->filter_info.src_port_mask == 0)
3652 mask &= ~TXGBE_5TFCTL0_MSPORT;
3653 if (filter->filter_info.dst_port_mask == 0)
3654 mask &= ~TXGBE_5TFCTL0_MDPORT;
3655 if (filter->filter_info.proto_mask == 0)
3656 mask &= ~TXGBE_5TFCTL0_MPROTO;
3658 ftqf |= TXGBE_5TFCTL0_MPOOL;
3659 ftqf |= TXGBE_5TFCTL0_ENA;
3661 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3662 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3663 wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3664 wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3666 l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3667 wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3671 * add a 5tuple filter
3674 * dev: Pointer to struct rte_eth_dev.
3675 * index: the index the filter allocates.
3676 * filter: pointer to the filter that will be added.
3677 * rx_queue: the queue id the filter assigned to.
3680 * - On success, zero.
3681 * - On failure, a negative value.
3684 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3685 struct txgbe_5tuple_filter *filter)
3687 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3691 * look for an unused 5tuple filter index,
3692 * and insert the filter to list.
3694 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3695 idx = i / (sizeof(uint32_t) * NBBY);
3696 shift = i % (sizeof(uint32_t) * NBBY);
3697 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3698 filter_info->fivetuple_mask[idx] |= 1 << shift;
3700 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3706 if (i >= TXGBE_MAX_FTQF_FILTERS) {
3707 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3711 txgbe_inject_5tuple_filter(dev, filter);
3717 * remove a 5tuple filter
3720 * dev: Pointer to struct rte_eth_dev.
3721 * filter: the pointer of the filter will be removed.
3724 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3725 struct txgbe_5tuple_filter *filter)
3727 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3728 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3729 uint16_t index = filter->index;
3731 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3732 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3733 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3736 wr32(hw, TXGBE_5TFDADDR(index), 0);
3737 wr32(hw, TXGBE_5TFSADDR(index), 0);
3738 wr32(hw, TXGBE_5TFPORT(index), 0);
3739 wr32(hw, TXGBE_5TFCTL0(index), 0);
3740 wr32(hw, TXGBE_5TFCTL1(index), 0);
3743 static inline struct txgbe_5tuple_filter *
3744 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3745 struct txgbe_5tuple_filter_info *key)
3747 struct txgbe_5tuple_filter *it;
3749 TAILQ_FOREACH(it, filter_list, entries) {
3750 if (memcmp(key, &it->filter_info,
3751 sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3758 /* translate elements in struct rte_eth_ntuple_filter
3759 * to struct txgbe_5tuple_filter_info
3762 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3763 struct txgbe_5tuple_filter_info *filter_info)
3765 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3766 filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3767 filter->priority < TXGBE_5TUPLE_MIN_PRI)
3770 switch (filter->dst_ip_mask) {
3772 filter_info->dst_ip_mask = 0;
3773 filter_info->dst_ip = filter->dst_ip;
3776 filter_info->dst_ip_mask = 1;
3779 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3783 switch (filter->src_ip_mask) {
3785 filter_info->src_ip_mask = 0;
3786 filter_info->src_ip = filter->src_ip;
3789 filter_info->src_ip_mask = 1;
3792 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3796 switch (filter->dst_port_mask) {
3798 filter_info->dst_port_mask = 0;
3799 filter_info->dst_port = filter->dst_port;
3802 filter_info->dst_port_mask = 1;
3805 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3809 switch (filter->src_port_mask) {
3811 filter_info->src_port_mask = 0;
3812 filter_info->src_port = filter->src_port;
3815 filter_info->src_port_mask = 1;
3818 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3822 switch (filter->proto_mask) {
3824 filter_info->proto_mask = 0;
3825 filter_info->proto =
3826 convert_protocol_type(filter->proto);
3829 filter_info->proto_mask = 1;
3832 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3836 filter_info->priority = (uint8_t)filter->priority;
3841 * add or delete a ntuple filter
3844 * dev: Pointer to struct rte_eth_dev.
3845 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3846 * add: if true, add filter, if false, remove filter
3849 * - On success, zero.
3850 * - On failure, a negative value.
3853 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
3854 struct rte_eth_ntuple_filter *ntuple_filter,
3857 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3858 struct txgbe_5tuple_filter_info filter_5tuple;
3859 struct txgbe_5tuple_filter *filter;
3862 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
3863 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3867 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
3868 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
3872 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
3874 if (filter != NULL && add) {
3875 PMD_DRV_LOG(ERR, "filter exists.");
3878 if (filter == NULL && !add) {
3879 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3884 filter = rte_zmalloc("txgbe_5tuple_filter",
3885 sizeof(struct txgbe_5tuple_filter), 0);
3888 rte_memcpy(&filter->filter_info,
3890 sizeof(struct txgbe_5tuple_filter_info));
3891 filter->queue = ntuple_filter->queue;
3892 ret = txgbe_add_5tuple_filter(dev, filter);
3898 txgbe_remove_5tuple_filter(dev, filter);
3905 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
3906 struct rte_eth_ethertype_filter *filter,
3909 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3910 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3914 struct txgbe_ethertype_filter ethertype_filter;
3916 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3919 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
3920 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
3921 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3922 " ethertype filter.", filter->ether_type);
3926 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3927 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3930 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3931 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3935 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
3936 if (ret >= 0 && add) {
3937 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3938 filter->ether_type);
3941 if (ret < 0 && !add) {
3942 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3943 filter->ether_type);
3948 etqf = TXGBE_ETFLT_ENA;
3949 etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
3950 etqs |= TXGBE_ETCLS_QPID(filter->queue);
3951 etqs |= TXGBE_ETCLS_QENA;
3953 ethertype_filter.ethertype = filter->ether_type;
3954 ethertype_filter.etqf = etqf;
3955 ethertype_filter.etqs = etqs;
3956 ethertype_filter.conf = FALSE;
3957 ret = txgbe_ethertype_filter_insert(filter_info,
3960 PMD_DRV_LOG(ERR, "ethertype filters are full.");
3964 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
3968 wr32(hw, TXGBE_ETFLT(ret), etqf);
3969 wr32(hw, TXGBE_ETCLS(ret), etqs);
3976 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
3977 enum rte_filter_type filter_type,
3978 enum rte_filter_op filter_op,
3983 switch (filter_type) {
3984 case RTE_ETH_FILTER_GENERIC:
3985 if (filter_op != RTE_ETH_FILTER_GET)
3987 *(const void **)arg = &txgbe_flow_ops;
3990 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4000 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4001 u8 **mc_addr_ptr, u32 *vmdq)
4006 mc_addr = *mc_addr_ptr;
4007 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4012 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4013 struct rte_ether_addr *mc_addr_set,
4014 uint32_t nb_mc_addr)
4016 struct txgbe_hw *hw;
4019 hw = TXGBE_DEV_HW(dev);
4020 mc_addr_list = (u8 *)mc_addr_set;
4021 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4022 txgbe_dev_addr_list_itr, TRUE);
4026 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4028 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4029 uint64_t systime_cycles;
4031 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4032 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4034 return systime_cycles;
4038 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4040 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4041 uint64_t rx_tstamp_cycles;
4043 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4044 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4045 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4047 return rx_tstamp_cycles;
4051 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4053 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4054 uint64_t tx_tstamp_cycles;
4056 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4057 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4058 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4060 return tx_tstamp_cycles;
4064 txgbe_start_timecounters(struct rte_eth_dev *dev)
4066 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4067 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4068 struct rte_eth_link link;
4069 uint32_t incval = 0;
4072 /* Get current link speed. */
4073 txgbe_dev_link_update(dev, 1);
4074 rte_eth_linkstatus_get(dev, &link);
4076 switch (link.link_speed) {
4077 case ETH_SPEED_NUM_100M:
4078 incval = TXGBE_INCVAL_100;
4079 shift = TXGBE_INCVAL_SHIFT_100;
4081 case ETH_SPEED_NUM_1G:
4082 incval = TXGBE_INCVAL_1GB;
4083 shift = TXGBE_INCVAL_SHIFT_1GB;
4085 case ETH_SPEED_NUM_10G:
4087 incval = TXGBE_INCVAL_10GB;
4088 shift = TXGBE_INCVAL_SHIFT_10GB;
4092 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4094 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4095 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4096 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4098 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4099 adapter->systime_tc.cc_shift = shift;
4100 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4102 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4103 adapter->rx_tstamp_tc.cc_shift = shift;
4104 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4106 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4107 adapter->tx_tstamp_tc.cc_shift = shift;
4108 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4112 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4114 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4116 adapter->systime_tc.nsec += delta;
4117 adapter->rx_tstamp_tc.nsec += delta;
4118 adapter->tx_tstamp_tc.nsec += delta;
4124 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4127 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4129 ns = rte_timespec_to_ns(ts);
4130 /* Set the timecounters to a new value. */
4131 adapter->systime_tc.nsec = ns;
4132 adapter->rx_tstamp_tc.nsec = ns;
4133 adapter->tx_tstamp_tc.nsec = ns;
4139 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4141 uint64_t ns, systime_cycles;
4142 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4144 systime_cycles = txgbe_read_systime_cyclecounter(dev);
4145 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4146 *ts = rte_ns_to_timespec(ns);
4152 txgbe_timesync_enable(struct rte_eth_dev *dev)
4154 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4157 /* Stop the timesync system time. */
4158 wr32(hw, TXGBE_TSTIMEINC, 0x0);
4159 /* Reset the timesync system time value. */
4160 wr32(hw, TXGBE_TSTIMEL, 0x0);
4161 wr32(hw, TXGBE_TSTIMEH, 0x0);
4163 txgbe_start_timecounters(dev);
4165 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4166 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4167 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4169 /* Enable timestamping of received PTP packets. */
4170 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4171 tsync_ctl |= TXGBE_TSRXCTL_ENA;
4172 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4174 /* Enable timestamping of transmitted PTP packets. */
4175 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4176 tsync_ctl |= TXGBE_TSTXCTL_ENA;
4177 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4185 txgbe_timesync_disable(struct rte_eth_dev *dev)
4187 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4190 /* Disable timestamping of transmitted PTP packets. */
4191 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4192 tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4193 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4195 /* Disable timestamping of received PTP packets. */
4196 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4197 tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4198 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4200 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4201 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4203 /* Stop incrementating the System Time registers. */
4204 wr32(hw, TXGBE_TSTIMEINC, 0);
4210 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4211 struct timespec *timestamp,
4212 uint32_t flags __rte_unused)
4214 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4215 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4216 uint32_t tsync_rxctl;
4217 uint64_t rx_tstamp_cycles;
4220 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4221 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4224 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4225 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4226 *timestamp = rte_ns_to_timespec(ns);
4232 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4233 struct timespec *timestamp)
4235 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4236 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4237 uint32_t tsync_txctl;
4238 uint64_t tx_tstamp_cycles;
4241 tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4242 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4245 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4246 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4247 *timestamp = rte_ns_to_timespec(ns);
4253 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4257 const struct reg_info *reg_group;
4258 const struct reg_info **reg_set = txgbe_regs_others;
4260 while ((reg_group = reg_set[g_ind++]))
4261 count += txgbe_regs_group_count(reg_group);
4267 txgbe_get_regs(struct rte_eth_dev *dev,
4268 struct rte_dev_reg_info *regs)
4270 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4271 uint32_t *data = regs->data;
4274 const struct reg_info *reg_group;
4275 const struct reg_info **reg_set = txgbe_regs_others;
4278 regs->length = txgbe_get_reg_length(dev);
4279 regs->width = sizeof(uint32_t);
4283 /* Support only full register dump */
4284 if (regs->length == 0 ||
4285 regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4286 regs->version = hw->mac.type << 24 |
4287 hw->revision_id << 16 |
4289 while ((reg_group = reg_set[g_ind++]))
4290 count += txgbe_read_regs_group(dev, &data[count],
4299 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4301 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4303 /* Return unit is byte count */
4304 return hw->rom.word_size * 2;
4308 txgbe_get_eeprom(struct rte_eth_dev *dev,
4309 struct rte_dev_eeprom_info *in_eeprom)
4311 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4312 struct txgbe_rom_info *eeprom = &hw->rom;
4313 uint16_t *data = in_eeprom->data;
4316 first = in_eeprom->offset >> 1;
4317 length = in_eeprom->length >> 1;
4318 if (first > hw->rom.word_size ||
4319 ((first + length) > hw->rom.word_size))
4322 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4324 return eeprom->readw_buffer(hw, first, length, data);
4328 txgbe_set_eeprom(struct rte_eth_dev *dev,
4329 struct rte_dev_eeprom_info *in_eeprom)
4331 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4332 struct txgbe_rom_info *eeprom = &hw->rom;
4333 uint16_t *data = in_eeprom->data;
4336 first = in_eeprom->offset >> 1;
4337 length = in_eeprom->length >> 1;
4338 if (first > hw->rom.word_size ||
4339 ((first + length) > hw->rom.word_size))
4342 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4344 return eeprom->writew_buffer(hw, first, length, data);
4348 txgbe_get_module_info(struct rte_eth_dev *dev,
4349 struct rte_eth_dev_module_info *modinfo)
4351 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4353 uint8_t sff8472_rev, addr_mode;
4354 bool page_swap = false;
4356 /* Check whether we support SFF-8472 or not */
4357 status = hw->phy.read_i2c_eeprom(hw,
4358 TXGBE_SFF_SFF_8472_COMP,
4363 /* addressing mode is not supported */
4364 status = hw->phy.read_i2c_eeprom(hw,
4365 TXGBE_SFF_SFF_8472_SWAP,
4370 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4372 "Address change required to access page 0xA2, "
4373 "but not supported. Please report the module "
4374 "type to the driver maintainers.");
4378 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4379 /* We have a SFP, but it does not support SFF-8472 */
4380 modinfo->type = RTE_ETH_MODULE_SFF_8079;
4381 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4383 /* We have a SFP which supports a revision of SFF-8472. */
4384 modinfo->type = RTE_ETH_MODULE_SFF_8472;
4385 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4392 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4393 struct rte_dev_eeprom_info *info)
4395 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4396 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4397 uint8_t databyte = 0xFF;
4398 uint8_t *data = info->data;
4401 if (info->length == 0)
4404 for (i = info->offset; i < info->offset + info->length; i++) {
4405 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4406 status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4408 status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4413 data[i - info->offset] = databyte;
4420 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4423 case txgbe_mac_raptor:
4431 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4432 struct rte_eth_dcb_info *dcb_info)
4434 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4435 struct txgbe_dcb_tc_config *tc;
4436 struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4440 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4441 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4443 dcb_info->nb_tcs = 1;
4445 tc_queue = &dcb_info->tc_queue;
4446 nb_tcs = dcb_info->nb_tcs;
4448 if (dcb_config->vt_mode) { /* vt is enabled */
4449 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4450 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4451 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4452 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4453 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4454 for (j = 0; j < nb_tcs; j++) {
4455 tc_queue->tc_rxq[0][j].base = j;
4456 tc_queue->tc_rxq[0][j].nb_queue = 1;
4457 tc_queue->tc_txq[0][j].base = j;
4458 tc_queue->tc_txq[0][j].nb_queue = 1;
4461 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4462 for (j = 0; j < nb_tcs; j++) {
4463 tc_queue->tc_rxq[i][j].base =
4465 tc_queue->tc_rxq[i][j].nb_queue = 1;
4466 tc_queue->tc_txq[i][j].base =
4468 tc_queue->tc_txq[i][j].nb_queue = 1;
4472 } else { /* vt is disabled */
4473 struct rte_eth_dcb_rx_conf *rx_conf =
4474 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4475 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4476 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4477 if (dcb_info->nb_tcs == ETH_4_TCS) {
4478 for (i = 0; i < dcb_info->nb_tcs; i++) {
4479 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4480 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4482 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4483 dcb_info->tc_queue.tc_txq[0][1].base = 64;
4484 dcb_info->tc_queue.tc_txq[0][2].base = 96;
4485 dcb_info->tc_queue.tc_txq[0][3].base = 112;
4486 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4487 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4488 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4489 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4490 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4491 for (i = 0; i < dcb_info->nb_tcs; i++) {
4492 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4493 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4495 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4496 dcb_info->tc_queue.tc_txq[0][1].base = 32;
4497 dcb_info->tc_queue.tc_txq[0][2].base = 64;
4498 dcb_info->tc_queue.tc_txq[0][3].base = 80;
4499 dcb_info->tc_queue.tc_txq[0][4].base = 96;
4500 dcb_info->tc_queue.tc_txq[0][5].base = 104;
4501 dcb_info->tc_queue.tc_txq[0][6].base = 112;
4502 dcb_info->tc_queue.tc_txq[0][7].base = 120;
4503 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4504 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4505 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4506 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4507 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4508 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4509 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4510 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4513 for (i = 0; i < dcb_info->nb_tcs; i++) {
4514 tc = &dcb_config->tc_config[i];
4515 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4520 /* restore n-tuple filter */
4522 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
4524 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4525 struct txgbe_5tuple_filter *node;
4527 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
4528 txgbe_inject_5tuple_filter(dev, node);
4532 /* restore ethernet type filter */
4534 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
4536 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4537 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4540 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
4541 if (filter_info->ethertype_mask & (1 << i)) {
4542 wr32(hw, TXGBE_ETFLT(i),
4543 filter_info->ethertype_filters[i].etqf);
4544 wr32(hw, TXGBE_ETCLS(i),
4545 filter_info->ethertype_filters[i].etqs);
4551 /* restore SYN filter */
4553 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
4555 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4556 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4559 synqf = filter_info->syn_info;
4561 if (synqf & TXGBE_SYNCLS_ENA) {
4562 wr32(hw, TXGBE_SYNCLS, synqf);
4568 txgbe_filter_restore(struct rte_eth_dev *dev)
4570 txgbe_ntuple_filter_restore(dev);
4571 txgbe_ethertype_filter_restore(dev);
4572 txgbe_syn_filter_restore(dev);
4577 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4578 .dev_configure = txgbe_dev_configure,
4579 .dev_infos_get = txgbe_dev_info_get,
4580 .dev_start = txgbe_dev_start,
4581 .dev_stop = txgbe_dev_stop,
4582 .dev_set_link_up = txgbe_dev_set_link_up,
4583 .dev_set_link_down = txgbe_dev_set_link_down,
4584 .dev_close = txgbe_dev_close,
4585 .dev_reset = txgbe_dev_reset,
4586 .promiscuous_enable = txgbe_dev_promiscuous_enable,
4587 .promiscuous_disable = txgbe_dev_promiscuous_disable,
4588 .allmulticast_enable = txgbe_dev_allmulticast_enable,
4589 .allmulticast_disable = txgbe_dev_allmulticast_disable,
4590 .link_update = txgbe_dev_link_update,
4591 .stats_get = txgbe_dev_stats_get,
4592 .xstats_get = txgbe_dev_xstats_get,
4593 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
4594 .stats_reset = txgbe_dev_stats_reset,
4595 .xstats_reset = txgbe_dev_xstats_reset,
4596 .xstats_get_names = txgbe_dev_xstats_get_names,
4597 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
4598 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
4599 .fw_version_get = txgbe_fw_version_get,
4600 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
4601 .mtu_set = txgbe_dev_mtu_set,
4602 .vlan_filter_set = txgbe_vlan_filter_set,
4603 .vlan_tpid_set = txgbe_vlan_tpid_set,
4604 .vlan_offload_set = txgbe_vlan_offload_set,
4605 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
4606 .rx_queue_start = txgbe_dev_rx_queue_start,
4607 .rx_queue_stop = txgbe_dev_rx_queue_stop,
4608 .tx_queue_start = txgbe_dev_tx_queue_start,
4609 .tx_queue_stop = txgbe_dev_tx_queue_stop,
4610 .rx_queue_setup = txgbe_dev_rx_queue_setup,
4611 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
4612 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
4613 .rx_queue_release = txgbe_dev_rx_queue_release,
4614 .tx_queue_setup = txgbe_dev_tx_queue_setup,
4615 .tx_queue_release = txgbe_dev_tx_queue_release,
4616 .dev_led_on = txgbe_dev_led_on,
4617 .dev_led_off = txgbe_dev_led_off,
4618 .flow_ctrl_get = txgbe_flow_ctrl_get,
4619 .flow_ctrl_set = txgbe_flow_ctrl_set,
4620 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
4621 .mac_addr_add = txgbe_add_rar,
4622 .mac_addr_remove = txgbe_remove_rar,
4623 .mac_addr_set = txgbe_set_default_mac_addr,
4624 .uc_hash_table_set = txgbe_uc_hash_table_set,
4625 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
4626 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
4627 .reta_update = txgbe_dev_rss_reta_update,
4628 .reta_query = txgbe_dev_rss_reta_query,
4629 .rss_hash_update = txgbe_dev_rss_hash_update,
4630 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
4631 .filter_ctrl = txgbe_dev_filter_ctrl,
4632 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
4633 .rxq_info_get = txgbe_rxq_info_get,
4634 .txq_info_get = txgbe_txq_info_get,
4635 .timesync_enable = txgbe_timesync_enable,
4636 .timesync_disable = txgbe_timesync_disable,
4637 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
4638 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
4639 .get_reg = txgbe_get_regs,
4640 .get_eeprom_length = txgbe_get_eeprom_length,
4641 .get_eeprom = txgbe_get_eeprom,
4642 .set_eeprom = txgbe_set_eeprom,
4643 .get_module_info = txgbe_get_module_info,
4644 .get_module_eeprom = txgbe_get_module_eeprom,
4645 .get_dcb_info = txgbe_dev_get_dcb_info,
4646 .timesync_adjust_time = txgbe_timesync_adjust_time,
4647 .timesync_read_time = txgbe_timesync_read_time,
4648 .timesync_write_time = txgbe_timesync_write_time,
4649 .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
4652 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
4653 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
4654 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
4656 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
4657 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
4659 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
4660 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
4662 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
4663 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
4666 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
4667 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);