1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
10 #include <rte_common.h>
11 #include <ethdev_pci.h>
13 #include <rte_interrupts.h>
15 #include <rte_debug.h>
17 #include <rte_memory.h>
19 #include <rte_alarm.h>
20 #include <rte_kvargs.h>
22 #include "txgbe_logs.h"
23 #include "base/txgbe.h"
24 #include "txgbe_ethdev.h"
25 #include "txgbe_rxtx.h"
26 #include "txgbe_regs_group.h"
28 static const struct reg_info txgbe_regs_general[] = {
29 {TXGBE_RST, 1, 1, "TXGBE_RST"},
30 {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
31 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
32 {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
33 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
34 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
38 static const struct reg_info txgbe_regs_nvm[] = {
42 static const struct reg_info txgbe_regs_interrupt[] = {
46 static const struct reg_info txgbe_regs_fctl_others[] = {
50 static const struct reg_info txgbe_regs_rxdma[] = {
54 static const struct reg_info txgbe_regs_rx[] = {
58 static struct reg_info txgbe_regs_tx[] = {
62 static const struct reg_info txgbe_regs_wakeup[] = {
66 static const struct reg_info txgbe_regs_dcb[] = {
70 static const struct reg_info txgbe_regs_mac[] = {
74 static const struct reg_info txgbe_regs_diagnostic[] = {
79 static const struct reg_info *txgbe_regs_others[] = {
83 txgbe_regs_fctl_others,
90 txgbe_regs_diagnostic,
93 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
96 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
97 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
98 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
99 static int txgbe_dev_close(struct rte_eth_dev *dev);
100 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
101 int wait_to_complete);
102 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
103 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
104 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
107 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
108 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
109 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
110 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
111 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
112 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
113 struct rte_intr_handle *handle);
114 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
115 struct rte_intr_handle *handle);
116 static void txgbe_dev_interrupt_handler(void *param);
117 static void txgbe_dev_interrupt_delayed_handler(void *param);
118 static void txgbe_configure_msix(struct rte_eth_dev *dev);
120 static int txgbe_filter_restore(struct rte_eth_dev *dev);
121 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
123 #define TXGBE_SET_HWSTRIP(h, q) do {\
124 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
125 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
126 (h)->bitmap[idx] |= 1 << bit;\
129 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
130 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
131 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
132 (h)->bitmap[idx] &= ~(1 << bit);\
135 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
136 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
137 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
138 (r) = (h)->bitmap[idx] >> bit & 1;\
142 * The set of PCI devices this driver supports
144 static const struct rte_pci_id pci_id_txgbe_map[] = {
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
147 { .vendor_id = 0, /* sentinel */ },
150 static const struct rte_eth_desc_lim rx_desc_lim = {
151 .nb_max = TXGBE_RING_DESC_MAX,
152 .nb_min = TXGBE_RING_DESC_MIN,
153 .nb_align = TXGBE_RXD_ALIGN,
156 static const struct rte_eth_desc_lim tx_desc_lim = {
157 .nb_max = TXGBE_RING_DESC_MAX,
158 .nb_min = TXGBE_RING_DESC_MIN,
159 .nb_align = TXGBE_TXD_ALIGN,
160 .nb_seg_max = TXGBE_TX_MAX_SEG,
161 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
164 static const struct eth_dev_ops txgbe_eth_dev_ops;
166 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
167 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
168 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
170 HW_XSTAT(mng_bmc2host_packets),
171 HW_XSTAT(mng_host2bmc_packets),
173 HW_XSTAT(rx_packets),
174 HW_XSTAT(tx_packets),
177 HW_XSTAT(rx_total_bytes),
178 HW_XSTAT(rx_total_packets),
179 HW_XSTAT(tx_total_packets),
180 HW_XSTAT(rx_total_missed_packets),
181 HW_XSTAT(rx_broadcast_packets),
182 HW_XSTAT(rx_multicast_packets),
183 HW_XSTAT(rx_management_packets),
184 HW_XSTAT(tx_management_packets),
185 HW_XSTAT(rx_management_dropped),
188 HW_XSTAT(rx_crc_errors),
189 HW_XSTAT(rx_illegal_byte_errors),
190 HW_XSTAT(rx_error_bytes),
191 HW_XSTAT(rx_mac_short_packet_dropped),
192 HW_XSTAT(rx_length_errors),
193 HW_XSTAT(rx_undersize_errors),
194 HW_XSTAT(rx_fragment_errors),
195 HW_XSTAT(rx_oversize_errors),
196 HW_XSTAT(rx_jabber_errors),
197 HW_XSTAT(rx_l3_l4_xsum_error),
198 HW_XSTAT(mac_local_errors),
199 HW_XSTAT(mac_remote_errors),
202 HW_XSTAT(flow_director_added_filters),
203 HW_XSTAT(flow_director_removed_filters),
204 HW_XSTAT(flow_director_filter_add_errors),
205 HW_XSTAT(flow_director_filter_remove_errors),
206 HW_XSTAT(flow_director_matched_filters),
207 HW_XSTAT(flow_director_missed_filters),
210 HW_XSTAT(rx_fcoe_crc_errors),
211 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
212 HW_XSTAT(rx_fcoe_dropped),
213 HW_XSTAT(rx_fcoe_packets),
214 HW_XSTAT(tx_fcoe_packets),
215 HW_XSTAT(rx_fcoe_bytes),
216 HW_XSTAT(tx_fcoe_bytes),
217 HW_XSTAT(rx_fcoe_no_ddp),
218 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
221 HW_XSTAT(tx_macsec_pkts_untagged),
222 HW_XSTAT(tx_macsec_pkts_encrypted),
223 HW_XSTAT(tx_macsec_pkts_protected),
224 HW_XSTAT(tx_macsec_octets_encrypted),
225 HW_XSTAT(tx_macsec_octets_protected),
226 HW_XSTAT(rx_macsec_pkts_untagged),
227 HW_XSTAT(rx_macsec_pkts_badtag),
228 HW_XSTAT(rx_macsec_pkts_nosci),
229 HW_XSTAT(rx_macsec_pkts_unknownsci),
230 HW_XSTAT(rx_macsec_octets_decrypted),
231 HW_XSTAT(rx_macsec_octets_validated),
232 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
233 HW_XSTAT(rx_macsec_sc_pkts_delayed),
234 HW_XSTAT(rx_macsec_sc_pkts_late),
235 HW_XSTAT(rx_macsec_sa_pkts_ok),
236 HW_XSTAT(rx_macsec_sa_pkts_invalid),
237 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
238 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
239 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
242 HW_XSTAT(rx_size_64_packets),
243 HW_XSTAT(rx_size_65_to_127_packets),
244 HW_XSTAT(rx_size_128_to_255_packets),
245 HW_XSTAT(rx_size_256_to_511_packets),
246 HW_XSTAT(rx_size_512_to_1023_packets),
247 HW_XSTAT(rx_size_1024_to_max_packets),
248 HW_XSTAT(tx_size_64_packets),
249 HW_XSTAT(tx_size_65_to_127_packets),
250 HW_XSTAT(tx_size_128_to_255_packets),
251 HW_XSTAT(tx_size_256_to_511_packets),
252 HW_XSTAT(tx_size_512_to_1023_packets),
253 HW_XSTAT(tx_size_1024_to_max_packets),
256 HW_XSTAT(tx_xon_packets),
257 HW_XSTAT(rx_xon_packets),
258 HW_XSTAT(tx_xoff_packets),
259 HW_XSTAT(rx_xoff_packets),
261 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
262 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
263 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
264 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
267 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
268 sizeof(rte_txgbe_stats_strings[0]))
270 /* Per-priority statistics */
271 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
272 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
273 UP_XSTAT(rx_up_packets),
274 UP_XSTAT(tx_up_packets),
275 UP_XSTAT(rx_up_bytes),
276 UP_XSTAT(tx_up_bytes),
277 UP_XSTAT(rx_up_drop_packets),
279 UP_XSTAT(tx_up_xon_packets),
280 UP_XSTAT(rx_up_xon_packets),
281 UP_XSTAT(tx_up_xoff_packets),
282 UP_XSTAT(rx_up_xoff_packets),
283 UP_XSTAT(rx_up_dropped),
284 UP_XSTAT(rx_up_mbuf_alloc_errors),
285 UP_XSTAT(tx_up_xon2off_packets),
288 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
289 sizeof(rte_txgbe_up_strings[0]))
291 /* Per-queue statistics */
292 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
293 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
294 QP_XSTAT(rx_qp_packets),
295 QP_XSTAT(tx_qp_packets),
296 QP_XSTAT(rx_qp_bytes),
297 QP_XSTAT(tx_qp_bytes),
298 QP_XSTAT(rx_qp_mc_packets),
301 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
302 sizeof(rte_txgbe_qp_strings[0]))
305 txgbe_is_sfp(struct txgbe_hw *hw)
307 switch (hw->phy.type) {
308 case txgbe_phy_sfp_avago:
309 case txgbe_phy_sfp_ftl:
310 case txgbe_phy_sfp_intel:
311 case txgbe_phy_sfp_unknown:
312 case txgbe_phy_sfp_tyco_passive:
313 case txgbe_phy_sfp_unknown_passive:
320 static inline int32_t
321 txgbe_pf_reset_hw(struct txgbe_hw *hw)
326 status = hw->mac.reset_hw(hw);
328 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
329 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
330 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
331 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
334 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
340 txgbe_enable_intr(struct rte_eth_dev *dev)
342 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
343 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
345 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
346 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
347 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
352 txgbe_disable_intr(struct txgbe_hw *hw)
354 PMD_INIT_FUNC_TRACE();
356 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
357 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
358 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
363 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
368 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
369 struct txgbe_stat_mappings *stat_mappings =
370 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
371 uint32_t qsmr_mask = 0;
372 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
376 if (hw->mac.type != txgbe_mac_raptor)
379 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
382 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
383 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
386 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
387 if (n >= TXGBE_NB_STAT_MAPPING) {
388 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
391 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
393 /* Now clear any previous stat_idx set */
394 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
396 stat_mappings->tqsm[n] &= ~clearing_mask;
398 stat_mappings->rqsm[n] &= ~clearing_mask;
400 q_map = (uint32_t)stat_idx;
401 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
402 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
404 stat_mappings->tqsm[n] |= qsmr_mask;
406 stat_mappings->rqsm[n] |= qsmr_mask;
408 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
409 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
411 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
412 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
417 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
421 struct txgbe_dcb_tc_config *tc;
423 UNREFERENCED_PARAMETER(hw);
425 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
426 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
427 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
428 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
429 tc = &dcb_config->tc_config[i];
430 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
431 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
432 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
433 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
434 tc->pfc = txgbe_dcb_pfc_disabled;
437 /* Initialize default user to priority mapping, UPx->TC0 */
438 tc = &dcb_config->tc_config[0];
439 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
440 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
441 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
442 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
443 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
445 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
446 dcb_config->pfc_mode_enable = false;
447 dcb_config->vt_mode = true;
448 dcb_config->round_robin_enable = false;
449 /* support all DCB capabilities */
450 dcb_config->support.capabilities = 0xFF;
454 * Ensure that all locks are released before first NVM or PHY access
457 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
462 * These ones are more tricky since they are common to all ports; but
463 * swfw_sync retries last long enough (1s) to be almost sure that if
464 * lock can not be taken it is due to an improper lock of the
467 mask = TXGBE_MNGSEM_SWPHY |
469 TXGBE_MNGSEM_SWFLASH;
470 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
471 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
473 hw->mac.release_swfw_sync(hw, mask);
477 txgbe_handle_devarg(__rte_unused const char *key, const char *value,
480 uint16_t *n = extra_args;
482 if (value == NULL || extra_args == NULL)
485 *n = (uint16_t)strtoul(value, NULL, 10);
486 if (*n == USHRT_MAX && errno == ERANGE)
493 txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
495 struct rte_kvargs *kvlist;
508 kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
512 rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
513 &txgbe_handle_devarg, &auto_neg);
514 rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
515 &txgbe_handle_devarg, &poll);
516 rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
517 &txgbe_handle_devarg, &present);
518 rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
519 &txgbe_handle_devarg, &sgmii);
520 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
521 &txgbe_handle_devarg, &ffe_set);
522 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
523 &txgbe_handle_devarg, &ffe_main);
524 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
525 &txgbe_handle_devarg, &ffe_pre);
526 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
527 &txgbe_handle_devarg, &ffe_post);
528 rte_kvargs_free(kvlist);
531 hw->devarg.auto_neg = auto_neg;
532 hw->devarg.poll = poll;
533 hw->devarg.present = present;
534 hw->devarg.sgmii = sgmii;
535 hw->phy.ffe_set = ffe_set;
536 hw->phy.ffe_main = ffe_main;
537 hw->phy.ffe_pre = ffe_pre;
538 hw->phy.ffe_post = ffe_post;
542 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
544 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
545 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
546 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
547 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
548 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
549 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
550 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
551 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
552 const struct rte_memzone *mz;
557 PMD_INIT_FUNC_TRACE();
559 eth_dev->dev_ops = &txgbe_eth_dev_ops;
560 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
561 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
562 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
563 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
564 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
565 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
568 * For secondary processes, we don't initialise any further as primary
569 * has already done this work. Only check we don't need a different
570 * RX and TX function.
572 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
573 struct txgbe_tx_queue *txq;
574 /* TX queue function in primary, set by last queue initialized
575 * Tx queue may not initialized by primary process
577 if (eth_dev->data->tx_queues) {
578 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
579 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
580 txgbe_set_tx_function(eth_dev, txq);
582 /* Use default TX function if we get here */
583 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
584 "Using default TX function.");
587 txgbe_set_rx_function(eth_dev);
592 rte_eth_copy_pci_info(eth_dev, pci_dev);
594 /* Vendor and Device ID need to be set before init of shared code */
595 hw->device_id = pci_dev->id.device_id;
596 hw->vendor_id = pci_dev->id.vendor_id;
597 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
598 hw->allow_unsupported_sfp = 1;
600 /* Reserve memory for interrupt status block */
601 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
602 16, TXGBE_ALIGN, SOCKET_ID_ANY);
606 hw->isb_dma = TMZ_PADDR(mz);
607 hw->isb_mem = TMZ_VADDR(mz);
609 txgbe_parse_devargs(hw, pci_dev->device.devargs);
610 /* Initialize the shared code (base driver) */
611 err = txgbe_init_shared_code(hw);
613 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
617 /* Unlock any pending hardware semaphore */
618 txgbe_swfw_lock_reset(hw);
620 #ifdef RTE_LIB_SECURITY
621 /* Initialize security_ctx only for primary process*/
622 if (txgbe_ipsec_ctx_create(eth_dev))
626 /* Initialize DCB configuration*/
627 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
628 txgbe_dcb_init(hw, dcb_config);
630 /* Get Hardware Flow Control setting */
631 hw->fc.requested_mode = txgbe_fc_full;
632 hw->fc.current_mode = txgbe_fc_full;
633 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
634 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
635 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
636 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
640 err = hw->rom.init_params(hw);
642 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
646 /* Make sure we have a good EEPROM before we read from it */
647 err = hw->rom.validate_checksum(hw, &csum);
649 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
653 err = hw->mac.init_hw(hw);
656 * Devices with copper phys will fail to initialise if txgbe_init_hw()
657 * is called too soon after the kernel driver unbinding/binding occurs.
658 * The failure occurs in txgbe_identify_phy() for all devices,
659 * but for non-copper devies, txgbe_identify_sfp_module() is
660 * also called. See txgbe_identify_phy(). The reason for the
661 * failure is not known, and only occuts when virtualisation features
662 * are disabled in the bios. A delay of 200ms was found to be enough by
663 * trial-and-error, and is doubled to be safe.
665 if (err && hw->phy.media_type == txgbe_media_type_copper) {
667 err = hw->mac.init_hw(hw);
670 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
673 if (err == TXGBE_ERR_EEPROM_VERSION) {
674 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
675 "LOM. Please be aware there may be issues associated "
676 "with your hardware.");
677 PMD_INIT_LOG(ERR, "If you are experiencing problems "
678 "please contact your hardware representative "
679 "who provided you with this hardware.");
680 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
681 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
684 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
688 /* Reset the hw statistics */
689 txgbe_dev_stats_reset(eth_dev);
691 /* disable interrupt */
692 txgbe_disable_intr(hw);
694 /* Allocate memory for storing MAC addresses */
695 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
696 hw->mac.num_rar_entries, 0);
697 if (eth_dev->data->mac_addrs == NULL) {
699 "Failed to allocate %u bytes needed to store "
701 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
705 /* Copy the permanent MAC address */
706 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
707 ð_dev->data->mac_addrs[0]);
709 /* Allocate memory for storing hash filter MAC addresses */
710 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
711 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
712 if (eth_dev->data->hash_mac_addrs == NULL) {
714 "Failed to allocate %d bytes needed to store MAC addresses",
715 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
719 /* initialize the vfta */
720 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
722 /* initialize the hw strip bitmap*/
723 memset(hwstrip, 0, sizeof(*hwstrip));
725 /* initialize PF if max_vfs not zero */
726 ret = txgbe_pf_host_init(eth_dev);
728 rte_free(eth_dev->data->mac_addrs);
729 eth_dev->data->mac_addrs = NULL;
730 rte_free(eth_dev->data->hash_mac_addrs);
731 eth_dev->data->hash_mac_addrs = NULL;
735 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
736 /* let hardware know driver is loaded */
737 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
738 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
739 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
740 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
743 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
744 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
745 (int)hw->mac.type, (int)hw->phy.type,
746 (int)hw->phy.sfp_type);
748 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
749 (int)hw->mac.type, (int)hw->phy.type);
751 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
752 eth_dev->data->port_id, pci_dev->id.vendor_id,
753 pci_dev->id.device_id);
755 rte_intr_callback_register(intr_handle,
756 txgbe_dev_interrupt_handler, eth_dev);
758 /* enable uio/vfio intr/eventfd mapping */
759 rte_intr_enable(intr_handle);
761 /* enable support intr */
762 txgbe_enable_intr(eth_dev);
764 /* initialize filter info */
765 memset(filter_info, 0,
766 sizeof(struct txgbe_filter_info));
768 /* initialize 5tuple filter list */
769 TAILQ_INIT(&filter_info->fivetuple_list);
771 /* initialize flow director filter list & hash */
772 txgbe_fdir_filter_init(eth_dev);
774 /* initialize l2 tunnel filter list & hash */
775 txgbe_l2_tn_filter_init(eth_dev);
777 /* initialize flow filter lists */
778 txgbe_filterlist_init();
780 /* initialize bandwidth configuration info */
781 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
783 /* initialize Traffic Manager configuration */
784 txgbe_tm_conf_init(eth_dev);
790 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
792 PMD_INIT_FUNC_TRACE();
794 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
797 txgbe_dev_close(eth_dev);
802 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
804 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
805 struct txgbe_5tuple_filter *p_5tuple;
807 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
808 TAILQ_REMOVE(&filter_info->fivetuple_list,
813 memset(filter_info->fivetuple_mask, 0,
814 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
819 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
821 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
822 struct txgbe_fdir_filter *fdir_filter;
824 if (fdir_info->hash_map)
825 rte_free(fdir_info->hash_map);
826 if (fdir_info->hash_handle)
827 rte_hash_free(fdir_info->hash_handle);
829 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
830 TAILQ_REMOVE(&fdir_info->fdir_list,
833 rte_free(fdir_filter);
839 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
841 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
842 struct txgbe_l2_tn_filter *l2_tn_filter;
844 if (l2_tn_info->hash_map)
845 rte_free(l2_tn_info->hash_map);
846 if (l2_tn_info->hash_handle)
847 rte_hash_free(l2_tn_info->hash_handle);
849 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
850 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
853 rte_free(l2_tn_filter);
859 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
861 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
862 char fdir_hash_name[RTE_HASH_NAMESIZE];
863 struct rte_hash_parameters fdir_hash_params = {
864 .name = fdir_hash_name,
865 .entries = TXGBE_MAX_FDIR_FILTER_NUM,
866 .key_len = sizeof(struct txgbe_atr_input),
867 .hash_func = rte_hash_crc,
868 .hash_func_init_val = 0,
869 .socket_id = rte_socket_id(),
872 TAILQ_INIT(&fdir_info->fdir_list);
873 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
874 "fdir_%s", TDEV_NAME(eth_dev));
875 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
876 if (!fdir_info->hash_handle) {
877 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
880 fdir_info->hash_map = rte_zmalloc("txgbe",
881 sizeof(struct txgbe_fdir_filter *) *
882 TXGBE_MAX_FDIR_FILTER_NUM,
884 if (!fdir_info->hash_map) {
886 "Failed to allocate memory for fdir hash map!");
889 fdir_info->mask_added = FALSE;
894 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
896 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
897 char l2_tn_hash_name[RTE_HASH_NAMESIZE];
898 struct rte_hash_parameters l2_tn_hash_params = {
899 .name = l2_tn_hash_name,
900 .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
901 .key_len = sizeof(struct txgbe_l2_tn_key),
902 .hash_func = rte_hash_crc,
903 .hash_func_init_val = 0,
904 .socket_id = rte_socket_id(),
907 TAILQ_INIT(&l2_tn_info->l2_tn_list);
908 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
909 "l2_tn_%s", TDEV_NAME(eth_dev));
910 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
911 if (!l2_tn_info->hash_handle) {
912 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
915 l2_tn_info->hash_map = rte_zmalloc("txgbe",
916 sizeof(struct txgbe_l2_tn_filter *) *
917 TXGBE_MAX_L2_TN_FILTER_NUM,
919 if (!l2_tn_info->hash_map) {
921 "Failed to allocate memory for L2 TN hash map!");
924 l2_tn_info->e_tag_en = FALSE;
925 l2_tn_info->e_tag_fwd_en = FALSE;
926 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
932 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
933 struct rte_pci_device *pci_dev)
935 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
936 sizeof(struct txgbe_adapter),
937 eth_dev_pci_specific_init, pci_dev,
938 eth_txgbe_dev_init, NULL);
941 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
943 struct rte_eth_dev *ethdev;
945 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
949 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
952 static struct rte_pci_driver rte_txgbe_pmd = {
953 .id_table = pci_id_txgbe_map,
954 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
955 RTE_PCI_DRV_INTR_LSC,
956 .probe = eth_txgbe_pci_probe,
957 .remove = eth_txgbe_pci_remove,
961 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
963 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
964 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
969 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
970 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
971 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
976 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
978 /* update local VFTA copy */
979 shadow_vfta->vfta[vid_idx] = vfta;
985 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
987 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
988 struct txgbe_rx_queue *rxq;
990 uint32_t rxcfg, rxbal, rxbah;
993 txgbe_vlan_hw_strip_enable(dev, queue);
995 txgbe_vlan_hw_strip_disable(dev, queue);
997 rxq = dev->data->rx_queues[queue];
998 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
999 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
1000 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1001 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1002 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1003 !(rxcfg & TXGBE_RXCFG_VLAN);
1004 rxcfg |= TXGBE_RXCFG_VLAN;
1006 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1007 (rxcfg & TXGBE_RXCFG_VLAN);
1008 rxcfg &= ~TXGBE_RXCFG_VLAN;
1010 rxcfg &= ~TXGBE_RXCFG_ENA;
1013 /* set vlan strip for ring */
1014 txgbe_dev_rx_queue_stop(dev, queue);
1015 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
1016 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
1017 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
1018 txgbe_dev_rx_queue_start(dev, queue);
1023 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1024 enum rte_vlan_type vlan_type,
1027 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1029 uint32_t portctrl, vlan_ext, qinq;
1031 portctrl = rd32(hw, TXGBE_PORTCTL);
1033 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
1034 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
1035 switch (vlan_type) {
1036 case RTE_ETH_VLAN_TYPE_INNER:
1038 wr32m(hw, TXGBE_VLANCTL,
1039 TXGBE_VLANCTL_TPID_MASK,
1040 TXGBE_VLANCTL_TPID(tpid));
1041 wr32m(hw, TXGBE_DMATXCTRL,
1042 TXGBE_DMATXCTRL_TPID_MASK,
1043 TXGBE_DMATXCTRL_TPID(tpid));
1046 PMD_DRV_LOG(ERR, "Inner type is not supported"
1051 wr32m(hw, TXGBE_TAGTPID(0),
1052 TXGBE_TAGTPID_LSB_MASK,
1053 TXGBE_TAGTPID_LSB(tpid));
1056 case RTE_ETH_VLAN_TYPE_OUTER:
1058 /* Only the high 16-bits is valid */
1059 wr32m(hw, TXGBE_EXTAG,
1060 TXGBE_EXTAG_VLAN_MASK,
1061 TXGBE_EXTAG_VLAN(tpid));
1063 wr32m(hw, TXGBE_VLANCTL,
1064 TXGBE_VLANCTL_TPID_MASK,
1065 TXGBE_VLANCTL_TPID(tpid));
1066 wr32m(hw, TXGBE_DMATXCTRL,
1067 TXGBE_DMATXCTRL_TPID_MASK,
1068 TXGBE_DMATXCTRL_TPID(tpid));
1072 wr32m(hw, TXGBE_TAGTPID(0),
1073 TXGBE_TAGTPID_MSB_MASK,
1074 TXGBE_TAGTPID_MSB(tpid));
1078 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1086 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1088 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1091 PMD_INIT_FUNC_TRACE();
1093 /* Filter Table Disable */
1094 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1095 vlnctrl &= ~TXGBE_VLANCTL_VFE;
1096 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1100 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1102 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1103 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1107 PMD_INIT_FUNC_TRACE();
1109 /* Filter Table Enable */
1110 vlnctrl = rd32(hw, TXGBE_VLANCTL);
1111 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1112 vlnctrl |= TXGBE_VLANCTL_VFE;
1113 wr32(hw, TXGBE_VLANCTL, vlnctrl);
1115 /* write whatever is in local vfta copy */
1116 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1117 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1121 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1123 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1124 struct txgbe_rx_queue *rxq;
1126 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1130 TXGBE_SET_HWSTRIP(hwstrip, queue);
1132 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1134 if (queue >= dev->data->nb_rx_queues)
1137 rxq = dev->data->rx_queues[queue];
1140 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1141 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1143 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
1144 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1149 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1151 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1154 PMD_INIT_FUNC_TRACE();
1156 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1157 ctrl &= ~TXGBE_RXCFG_VLAN;
1158 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1160 /* record those setting for HW strip per queue */
1161 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1165 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1167 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1170 PMD_INIT_FUNC_TRACE();
1172 ctrl = rd32(hw, TXGBE_RXCFG(queue));
1173 ctrl |= TXGBE_RXCFG_VLAN;
1174 wr32(hw, TXGBE_RXCFG(queue), ctrl);
1176 /* record those setting for HW strip per queue */
1177 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1181 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1183 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1186 PMD_INIT_FUNC_TRACE();
1188 ctrl = rd32(hw, TXGBE_PORTCTL);
1189 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1190 wr32(hw, TXGBE_PORTCTL, ctrl);
1194 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1196 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1199 PMD_INIT_FUNC_TRACE();
1201 ctrl = rd32(hw, TXGBE_PORTCTL);
1202 ctrl |= TXGBE_PORTCTL_VLANEXT;
1203 wr32(hw, TXGBE_PORTCTL, ctrl);
1207 txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
1209 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1212 PMD_INIT_FUNC_TRACE();
1214 ctrl = rd32(hw, TXGBE_PORTCTL);
1215 ctrl &= ~TXGBE_PORTCTL_QINQ;
1216 wr32(hw, TXGBE_PORTCTL, ctrl);
1220 txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
1222 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1225 PMD_INIT_FUNC_TRACE();
1227 ctrl = rd32(hw, TXGBE_PORTCTL);
1228 ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
1229 wr32(hw, TXGBE_PORTCTL, ctrl);
1233 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1235 struct txgbe_rx_queue *rxq;
1238 PMD_INIT_FUNC_TRACE();
1240 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1241 rxq = dev->data->rx_queues[i];
1243 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1244 txgbe_vlan_strip_queue_set(dev, i, 1);
1246 txgbe_vlan_strip_queue_set(dev, i, 0);
1251 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1254 struct rte_eth_rxmode *rxmode;
1255 struct txgbe_rx_queue *rxq;
1257 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1258 rxmode = &dev->data->dev_conf.rxmode;
1259 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1260 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1261 rxq = dev->data->rx_queues[i];
1262 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1265 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1266 rxq = dev->data->rx_queues[i];
1267 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1273 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1275 struct rte_eth_rxmode *rxmode;
1276 rxmode = &dev->data->dev_conf.rxmode;
1278 if (mask & RTE_ETH_VLAN_STRIP_MASK)
1279 txgbe_vlan_hw_strip_config(dev);
1281 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1282 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1283 txgbe_vlan_hw_filter_enable(dev);
1285 txgbe_vlan_hw_filter_disable(dev);
1288 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1289 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
1290 txgbe_vlan_hw_extend_enable(dev);
1292 txgbe_vlan_hw_extend_disable(dev);
1295 if (mask & RTE_ETH_QINQ_STRIP_MASK) {
1296 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1297 txgbe_qinq_hw_strip_enable(dev);
1299 txgbe_qinq_hw_strip_disable(dev);
1306 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1308 txgbe_config_vlan_strip_on_all_queues(dev, mask);
1310 txgbe_vlan_offload_config(dev, mask);
1316 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1318 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1319 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1320 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1322 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1323 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1327 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1329 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1334 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
1337 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
1343 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1344 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1345 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1346 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1351 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1353 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1354 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1355 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1357 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1358 /* check multi-queue mode */
1359 switch (dev_conf->rxmode.mq_mode) {
1360 case RTE_ETH_MQ_RX_VMDQ_DCB:
1361 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1363 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
1364 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1365 PMD_INIT_LOG(ERR, "SRIOV active,"
1366 " unsupported mq_mode rx %d.",
1367 dev_conf->rxmode.mq_mode);
1369 case RTE_ETH_MQ_RX_RSS:
1370 case RTE_ETH_MQ_RX_VMDQ_RSS:
1371 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
1372 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1373 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1374 PMD_INIT_LOG(ERR, "SRIOV is active,"
1375 " invalid queue number"
1376 " for VMDQ RSS, allowed"
1377 " value are 1, 2 or 4.");
1381 case RTE_ETH_MQ_RX_VMDQ_ONLY:
1382 case RTE_ETH_MQ_RX_NONE:
1383 /* if nothing mq mode configure, use default scheme */
1384 dev->data->dev_conf.rxmode.mq_mode =
1385 RTE_ETH_MQ_RX_VMDQ_ONLY;
1387 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
1388 /* SRIOV only works in VMDq enable mode */
1389 PMD_INIT_LOG(ERR, "SRIOV is active,"
1390 " wrong mq_mode rx %d.",
1391 dev_conf->rxmode.mq_mode);
1395 switch (dev_conf->txmode.mq_mode) {
1396 case RTE_ETH_MQ_TX_VMDQ_DCB:
1397 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1398 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
1400 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
1401 dev->data->dev_conf.txmode.mq_mode =
1402 RTE_ETH_MQ_TX_VMDQ_ONLY;
1406 /* check valid queue number */
1407 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1408 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1409 PMD_INIT_LOG(ERR, "SRIOV is active,"
1410 " nb_rx_q=%d nb_tx_q=%d queue number"
1411 " must be less than or equal to %d.",
1413 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1417 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
1418 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1422 /* check configuration for vmdb+dcb mode */
1423 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
1424 const struct rte_eth_vmdq_dcb_conf *conf;
1426 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1427 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1428 TXGBE_VMDQ_DCB_NB_QUEUES);
1431 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1432 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1433 conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
1434 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1435 " nb_queue_pools must be %d or %d.",
1436 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
1440 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
1441 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1443 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1444 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1445 TXGBE_VMDQ_DCB_NB_QUEUES);
1448 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1449 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1450 conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
1451 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1452 " nb_queue_pools != %d and"
1453 " nb_queue_pools != %d.",
1454 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
1459 /* For DCB mode check our configuration before we go further */
1460 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
1461 const struct rte_eth_dcb_rx_conf *conf;
1463 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1464 if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1465 conf->nb_tcs == RTE_ETH_8_TCS)) {
1466 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1467 " and nb_tcs != %d.",
1468 RTE_ETH_4_TCS, RTE_ETH_8_TCS);
1473 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
1474 const struct rte_eth_dcb_tx_conf *conf;
1476 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1477 if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1478 conf->nb_tcs == RTE_ETH_8_TCS)) {
1479 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1480 " and nb_tcs != %d.",
1481 RTE_ETH_4_TCS, RTE_ETH_8_TCS);
1490 txgbe_dev_configure(struct rte_eth_dev *dev)
1492 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1493 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1496 PMD_INIT_FUNC_TRACE();
1498 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1499 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1501 /* multiple queue mode checking */
1502 ret = txgbe_check_mq_mode(dev);
1504 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1509 /* set flag to update link status after init */
1510 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1513 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1514 * allocation Rx preconditions we will reset it.
1516 adapter->rx_bulk_alloc_allowed = true;
1522 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1524 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1525 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1528 gpie = rd32(hw, TXGBE_GPIOINTEN);
1529 gpie |= TXGBE_GPIOBIT_6;
1530 wr32(hw, TXGBE_GPIOINTEN, gpie);
1531 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1532 intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
1536 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1537 uint16_t tx_rate, uint64_t q_msk)
1539 struct txgbe_hw *hw;
1540 struct txgbe_vf_info *vfinfo;
1541 struct rte_eth_link link;
1542 uint8_t nb_q_per_pool;
1543 uint32_t queue_stride;
1544 uint32_t queue_idx, idx = 0, vf_idx;
1546 uint16_t total_rate = 0;
1547 struct rte_pci_device *pci_dev;
1550 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1551 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1555 if (vf >= pci_dev->max_vfs)
1558 if (tx_rate > link.link_speed)
1564 hw = TXGBE_DEV_HW(dev);
1565 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1566 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1567 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1568 queue_idx = vf * queue_stride;
1569 queue_end = queue_idx + nb_q_per_pool - 1;
1570 if (queue_end >= hw->mac.max_tx_queues)
1574 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1577 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1579 total_rate += vfinfo[vf_idx].tx_rate[idx];
1585 /* Store tx_rate for this vf. */
1586 for (idx = 0; idx < nb_q_per_pool; idx++) {
1587 if (((uint64_t)0x1 << idx) & q_msk) {
1588 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1589 vfinfo[vf].tx_rate[idx] = tx_rate;
1590 total_rate += tx_rate;
1594 if (total_rate > dev->data->dev_link.link_speed) {
1595 /* Reset stored TX rate of the VF if it causes exceed
1598 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1602 /* Set ARBTXRATE of each queue/pool for vf X */
1603 for (; queue_idx <= queue_end; queue_idx++) {
1605 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1613 * Configure device link speed and setup link.
1614 * It returns 0 on success.
1617 txgbe_dev_start(struct rte_eth_dev *dev)
1619 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1620 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1621 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1622 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1623 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1624 uint32_t intr_vector = 0;
1626 bool link_up = false, negotiate = 0;
1628 uint32_t allowed_speeds = 0;
1632 uint32_t *link_speeds;
1633 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1635 PMD_INIT_FUNC_TRACE();
1637 /* Stop the link setup handler before resetting the HW. */
1638 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1640 /* disable uio/vfio intr/eventfd mapping */
1641 rte_intr_disable(intr_handle);
1644 hw->adapter_stopped = 0;
1647 /* reinitialize adapter
1648 * this calls reset and start
1650 hw->nb_rx_queues = dev->data->nb_rx_queues;
1651 hw->nb_tx_queues = dev->data->nb_tx_queues;
1652 status = txgbe_pf_reset_hw(hw);
1655 hw->mac.start_hw(hw);
1656 hw->mac.get_link_status = true;
1657 hw->dev_start = true;
1659 /* configure PF module if SRIOV enabled */
1660 txgbe_pf_host_configure(dev);
1662 txgbe_dev_phy_intr_setup(dev);
1664 /* check and configure queue intr-vector mapping */
1665 if ((rte_intr_cap_multiple(intr_handle) ||
1666 !RTE_ETH_DEV_SRIOV(dev).active) &&
1667 dev->data->dev_conf.intr_conf.rxq != 0) {
1668 intr_vector = dev->data->nb_rx_queues;
1669 if (rte_intr_efd_enable(intr_handle, intr_vector))
1673 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1674 intr_handle->intr_vec =
1675 rte_zmalloc("intr_vec",
1676 dev->data->nb_rx_queues * sizeof(int), 0);
1677 if (intr_handle->intr_vec == NULL) {
1678 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1679 " intr_vec", dev->data->nb_rx_queues);
1684 /* confiugre msix for sleep until rx interrupt */
1685 txgbe_configure_msix(dev);
1687 /* initialize transmission unit */
1688 txgbe_dev_tx_init(dev);
1690 /* This can fail when allocating mbufs for descriptor rings */
1691 err = txgbe_dev_rx_init(dev);
1693 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1697 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1698 RTE_ETH_VLAN_EXTEND_MASK;
1699 err = txgbe_vlan_offload_config(dev, mask);
1701 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1705 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1706 /* Enable vlan filtering for VMDq */
1707 txgbe_vmdq_vlan_hw_filter_enable(dev);
1710 /* Configure DCB hw */
1711 txgbe_configure_pb(dev);
1712 txgbe_configure_port(dev);
1713 txgbe_configure_dcb(dev);
1715 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1716 err = txgbe_fdir_configure(dev);
1721 /* Restore vf rate limit */
1722 if (vfinfo != NULL) {
1723 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1724 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1725 if (vfinfo[vf].tx_rate[idx] != 0)
1726 txgbe_set_vf_rate_limit(dev, vf,
1727 vfinfo[vf].tx_rate[idx],
1731 err = txgbe_dev_rxtx_start(dev);
1733 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1737 /* Skip link setup if loopback mode is enabled. */
1738 if (hw->mac.type == txgbe_mac_raptor &&
1739 dev->data->dev_conf.lpbk_mode)
1740 goto skip_link_setup;
1742 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1743 err = hw->mac.setup_sfp(hw);
1748 if (hw->phy.media_type == txgbe_media_type_copper) {
1749 /* Turn on the copper */
1750 hw->phy.set_phy_power(hw, true);
1752 /* Turn on the laser */
1753 hw->mac.enable_tx_laser(hw);
1756 if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
1757 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1760 dev->data->dev_link.link_status = link_up;
1762 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1766 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
1767 RTE_ETH_LINK_SPEED_10G;
1769 link_speeds = &dev->data->dev_conf.link_speeds;
1770 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1771 PMD_INIT_LOG(ERR, "Invalid link setting");
1776 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1777 speed = (TXGBE_LINK_SPEED_100M_FULL |
1778 TXGBE_LINK_SPEED_1GB_FULL |
1779 TXGBE_LINK_SPEED_10GB_FULL);
1781 if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
1782 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1783 if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
1784 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1785 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
1786 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1787 if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1788 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1789 if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1790 speed |= TXGBE_LINK_SPEED_100M_FULL;
1793 err = hw->mac.setup_link(hw, speed, link_up);
1799 if (rte_intr_allow_others(intr_handle)) {
1800 txgbe_dev_misc_interrupt_setup(dev);
1801 /* check if lsc interrupt is enabled */
1802 if (dev->data->dev_conf.intr_conf.lsc != 0)
1803 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1805 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1806 txgbe_dev_macsec_interrupt_setup(dev);
1807 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1809 rte_intr_callback_unregister(intr_handle,
1810 txgbe_dev_interrupt_handler, dev);
1811 if (dev->data->dev_conf.intr_conf.lsc != 0)
1812 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1813 " no intr multiplex");
1816 /* check if rxq interrupt is enabled */
1817 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1818 rte_intr_dp_is_en(intr_handle))
1819 txgbe_dev_rxq_interrupt_setup(dev);
1821 /* enable uio/vfio intr/eventfd mapping */
1822 rte_intr_enable(intr_handle);
1824 /* resume enabled intr since hw reset */
1825 txgbe_enable_intr(dev);
1826 txgbe_l2_tunnel_conf(dev);
1827 txgbe_filter_restore(dev);
1829 if (tm_conf->root && !tm_conf->committed)
1830 PMD_DRV_LOG(WARNING,
1831 "please call hierarchy_commit() "
1832 "before starting the port");
1835 * Update link status right before return, because it may
1836 * start link configuration process in a separate thread.
1838 txgbe_dev_link_update(dev, 0);
1840 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1842 txgbe_read_stats_registers(hw, hw_stats);
1843 hw->offset_loaded = 1;
1848 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1849 txgbe_dev_clear_queues(dev);
1854 * Stop device: disable rx and tx functions to allow for reconfiguring.
1857 txgbe_dev_stop(struct rte_eth_dev *dev)
1859 struct rte_eth_link link;
1860 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1861 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1862 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1863 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1864 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1866 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1868 if (hw->adapter_stopped)
1871 PMD_INIT_FUNC_TRACE();
1873 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1875 /* disable interrupts */
1876 txgbe_disable_intr(hw);
1879 txgbe_pf_reset_hw(hw);
1880 hw->adapter_stopped = 0;
1885 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1886 vfinfo[vf].clear_to_send = false;
1888 if (hw->phy.media_type == txgbe_media_type_copper) {
1889 /* Turn off the copper */
1890 hw->phy.set_phy_power(hw, false);
1892 /* Turn off the laser */
1893 hw->mac.disable_tx_laser(hw);
1896 txgbe_dev_clear_queues(dev);
1898 /* Clear stored conf */
1899 dev->data->scattered_rx = 0;
1902 /* Clear recorded link status */
1903 memset(&link, 0, sizeof(link));
1904 rte_eth_linkstatus_set(dev, &link);
1906 if (!rte_intr_allow_others(intr_handle))
1907 /* resume to the default handler */
1908 rte_intr_callback_register(intr_handle,
1909 txgbe_dev_interrupt_handler,
1912 /* Clean datapath event and queue/vec mapping */
1913 rte_intr_efd_disable(intr_handle);
1914 if (intr_handle->intr_vec != NULL) {
1915 rte_free(intr_handle->intr_vec);
1916 intr_handle->intr_vec = NULL;
1919 /* reset hierarchy commit */
1920 tm_conf->committed = false;
1922 adapter->rss_reta_updated = 0;
1923 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1925 hw->adapter_stopped = true;
1926 dev->data->dev_started = 0;
1927 hw->dev_start = false;
1933 * Set device link up: enable tx.
1936 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1938 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1940 if (hw->phy.media_type == txgbe_media_type_copper) {
1941 /* Turn on the copper */
1942 hw->phy.set_phy_power(hw, true);
1944 /* Turn on the laser */
1945 hw->mac.enable_tx_laser(hw);
1946 txgbe_dev_link_update(dev, 0);
1953 * Set device link down: disable tx.
1956 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1958 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1960 if (hw->phy.media_type == txgbe_media_type_copper) {
1961 /* Turn off the copper */
1962 hw->phy.set_phy_power(hw, false);
1964 /* Turn off the laser */
1965 hw->mac.disable_tx_laser(hw);
1966 txgbe_dev_link_update(dev, 0);
1973 * Reset and stop device.
1976 txgbe_dev_close(struct rte_eth_dev *dev)
1978 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1979 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1980 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1984 PMD_INIT_FUNC_TRACE();
1986 txgbe_pf_reset_hw(hw);
1988 ret = txgbe_dev_stop(dev);
1990 txgbe_dev_free_queues(dev);
1992 /* reprogram the RAR[0] in case user changed it. */
1993 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1995 /* Unlock any pending hardware semaphore */
1996 txgbe_swfw_lock_reset(hw);
1998 /* disable uio intr before callback unregister */
1999 rte_intr_disable(intr_handle);
2002 ret = rte_intr_callback_unregister(intr_handle,
2003 txgbe_dev_interrupt_handler, dev);
2004 if (ret >= 0 || ret == -ENOENT) {
2006 } else if (ret != -EAGAIN) {
2008 "intr callback unregister failed: %d",
2012 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
2014 /* cancel the delay handler before remove dev */
2015 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
2017 /* uninitialize PF if max_vfs not zero */
2018 txgbe_pf_host_uninit(dev);
2020 rte_free(dev->data->mac_addrs);
2021 dev->data->mac_addrs = NULL;
2023 rte_free(dev->data->hash_mac_addrs);
2024 dev->data->hash_mac_addrs = NULL;
2026 /* remove all the fdir filters & hash */
2027 txgbe_fdir_filter_uninit(dev);
2029 /* remove all the L2 tunnel filters & hash */
2030 txgbe_l2_tn_filter_uninit(dev);
2032 /* Remove all ntuple filters of the device */
2033 txgbe_ntuple_filter_uninit(dev);
2035 /* clear all the filters list */
2036 txgbe_filterlist_flush();
2038 /* Remove all Traffic Manager configuration */
2039 txgbe_tm_conf_uninit(dev);
2041 #ifdef RTE_LIB_SECURITY
2042 rte_free(dev->security_ctx);
2052 txgbe_dev_reset(struct rte_eth_dev *dev)
2056 /* When a DPDK PMD PF begin to reset PF port, it should notify all
2057 * its VF to make them align with it. The detailed notification
2058 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
2059 * To avoid unexpected behavior in VF, currently reset of PF with
2060 * SR-IOV activation is not supported. It might be supported later.
2062 if (dev->data->sriov.active)
2065 ret = eth_txgbe_dev_uninit(dev);
2069 ret = eth_txgbe_dev_init(dev, NULL);
2074 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
2076 uint32_t current_counter = rd32(hw, reg); \
2077 if (current_counter < last_counter) \
2078 current_counter += 0x100000000LL; \
2079 if (!hw->offset_loaded) \
2080 last_counter = current_counter; \
2081 counter = current_counter - last_counter; \
2082 counter &= 0xFFFFFFFFLL; \
2085 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2087 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
2088 uint64_t current_counter_msb = rd32(hw, reg_msb); \
2089 uint64_t current_counter = (current_counter_msb << 32) | \
2090 current_counter_lsb; \
2091 if (current_counter < last_counter) \
2092 current_counter += 0x1000000000LL; \
2093 if (!hw->offset_loaded) \
2094 last_counter = current_counter; \
2095 counter = current_counter - last_counter; \
2096 counter &= 0xFFFFFFFFFLL; \
2100 txgbe_read_stats_registers(struct txgbe_hw *hw,
2101 struct txgbe_hw_stats *hw_stats)
2106 for (i = 0; i < hw->nb_rx_queues; i++) {
2107 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2108 hw->qp_last[i].rx_qp_packets,
2109 hw_stats->qp[i].rx_qp_packets);
2110 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2111 hw->qp_last[i].rx_qp_bytes,
2112 hw_stats->qp[i].rx_qp_bytes);
2113 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2114 hw->qp_last[i].rx_qp_mc_packets,
2115 hw_stats->qp[i].rx_qp_mc_packets);
2118 for (i = 0; i < hw->nb_tx_queues; i++) {
2119 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2120 hw->qp_last[i].tx_qp_packets,
2121 hw_stats->qp[i].tx_qp_packets);
2122 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2123 hw->qp_last[i].tx_qp_bytes,
2124 hw_stats->qp[i].tx_qp_bytes);
2127 for (i = 0; i < TXGBE_MAX_UP; i++) {
2128 hw_stats->up[i].rx_up_xon_packets +=
2129 rd32(hw, TXGBE_PBRXUPXON(i));
2130 hw_stats->up[i].rx_up_xoff_packets +=
2131 rd32(hw, TXGBE_PBRXUPXOFF(i));
2132 hw_stats->up[i].tx_up_xon_packets +=
2133 rd32(hw, TXGBE_PBTXUPXON(i));
2134 hw_stats->up[i].tx_up_xoff_packets +=
2135 rd32(hw, TXGBE_PBTXUPXOFF(i));
2136 hw_stats->up[i].tx_up_xon2off_packets +=
2137 rd32(hw, TXGBE_PBTXUPOFF(i));
2138 hw_stats->up[i].rx_up_dropped +=
2139 rd32(hw, TXGBE_PBRXMISS(i));
2141 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2142 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2143 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2144 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2147 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2148 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2150 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2151 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2152 hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2153 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2156 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2157 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2158 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2160 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2161 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2162 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2164 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2165 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2167 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2168 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2169 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2170 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2171 hw_stats->rx_size_512_to_1023_packets +=
2172 rd64(hw, TXGBE_MACRX512TO1023L);
2173 hw_stats->rx_size_1024_to_max_packets +=
2174 rd64(hw, TXGBE_MACRX1024TOMAXL);
2175 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2176 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2177 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2178 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2179 hw_stats->tx_size_512_to_1023_packets +=
2180 rd64(hw, TXGBE_MACTX512TO1023L);
2181 hw_stats->tx_size_1024_to_max_packets +=
2182 rd64(hw, TXGBE_MACTX1024TOMAXL);
2184 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2185 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2186 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2189 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2190 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2191 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2192 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2195 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2196 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2197 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2198 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2199 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2200 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2201 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2203 /* Flow Director Stats */
2204 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2205 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2206 hw_stats->flow_director_added_filters +=
2207 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2208 hw_stats->flow_director_removed_filters +=
2209 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2210 hw_stats->flow_director_filter_add_errors +=
2211 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2212 hw_stats->flow_director_filter_remove_errors +=
2213 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2216 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2217 hw_stats->tx_macsec_pkts_encrypted +=
2218 rd32(hw, TXGBE_LSECTX_ENCPKT);
2219 hw_stats->tx_macsec_pkts_protected +=
2220 rd32(hw, TXGBE_LSECTX_PROTPKT);
2221 hw_stats->tx_macsec_octets_encrypted +=
2222 rd32(hw, TXGBE_LSECTX_ENCOCT);
2223 hw_stats->tx_macsec_octets_protected +=
2224 rd32(hw, TXGBE_LSECTX_PROTOCT);
2225 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2226 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2227 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2228 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2229 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2230 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2231 hw_stats->rx_macsec_sc_pkts_unchecked +=
2232 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2233 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2234 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2235 for (i = 0; i < 2; i++) {
2236 hw_stats->rx_macsec_sa_pkts_ok +=
2237 rd32(hw, TXGBE_LSECRX_OKPKT(i));
2238 hw_stats->rx_macsec_sa_pkts_invalid +=
2239 rd32(hw, TXGBE_LSECRX_INVPKT(i));
2240 hw_stats->rx_macsec_sa_pkts_notvalid +=
2241 rd32(hw, TXGBE_LSECRX_BADPKT(i));
2243 hw_stats->rx_macsec_sa_pkts_unusedsa +=
2244 rd32(hw, TXGBE_LSECRX_INVSAPKT);
2245 hw_stats->rx_macsec_sa_pkts_notusingsa +=
2246 rd32(hw, TXGBE_LSECRX_BADSAPKT);
2248 hw_stats->rx_total_missed_packets = 0;
2249 for (i = 0; i < TXGBE_MAX_UP; i++) {
2250 hw_stats->rx_total_missed_packets +=
2251 hw_stats->up[i].rx_up_dropped;
2256 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2258 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2259 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2260 struct txgbe_stat_mappings *stat_mappings =
2261 TXGBE_DEV_STAT_MAPPINGS(dev);
2264 txgbe_read_stats_registers(hw, hw_stats);
2269 /* Fill out the rte_eth_stats statistics structure */
2270 stats->ipackets = hw_stats->rx_packets;
2271 stats->ibytes = hw_stats->rx_bytes;
2272 stats->opackets = hw_stats->tx_packets;
2273 stats->obytes = hw_stats->tx_bytes;
2275 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2276 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2277 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2278 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2279 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2280 for (i = 0; i < TXGBE_MAX_QP; i++) {
2281 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2282 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2285 q_map = (stat_mappings->rqsm[n] >> offset)
2286 & QMAP_FIELD_RESERVED_BITS_MASK;
2287 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2288 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2289 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2290 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2292 q_map = (stat_mappings->tqsm[n] >> offset)
2293 & QMAP_FIELD_RESERVED_BITS_MASK;
2294 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2295 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2296 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2297 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2301 stats->imissed = hw_stats->rx_total_missed_packets +
2302 hw_stats->rx_dma_drop;
2303 stats->ierrors = hw_stats->rx_crc_errors +
2304 hw_stats->rx_mac_short_packet_dropped +
2305 hw_stats->rx_length_errors +
2306 hw_stats->rx_undersize_errors +
2307 hw_stats->rx_oversize_errors +
2308 hw_stats->rx_drop_packets +
2309 hw_stats->rx_illegal_byte_errors +
2310 hw_stats->rx_error_bytes +
2311 hw_stats->rx_fragment_errors +
2312 hw_stats->rx_fcoe_crc_errors +
2313 hw_stats->rx_fcoe_mbuf_allocation_errors;
2321 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2323 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2324 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2326 /* HW registers are cleared on read */
2327 hw->offset_loaded = 0;
2328 txgbe_dev_stats_get(dev, NULL);
2329 hw->offset_loaded = 1;
2331 /* Reset software totals */
2332 memset(hw_stats, 0, sizeof(*hw_stats));
2337 /* This function calculates the number of xstats based on the current config */
2339 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2341 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2342 return TXGBE_NB_HW_STATS +
2343 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2344 TXGBE_NB_QP_STATS * nb_queues;
2348 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2352 /* Extended stats from txgbe_hw_stats */
2353 if (id < TXGBE_NB_HW_STATS) {
2354 snprintf(name, size, "[hw]%s",
2355 rte_txgbe_stats_strings[id].name);
2358 id -= TXGBE_NB_HW_STATS;
2360 /* Priority Stats */
2361 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2362 nb = id / TXGBE_NB_UP_STATS;
2363 st = id % TXGBE_NB_UP_STATS;
2364 snprintf(name, size, "[p%u]%s", nb,
2365 rte_txgbe_up_strings[st].name);
2368 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2371 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2372 nb = id / TXGBE_NB_QP_STATS;
2373 st = id % TXGBE_NB_QP_STATS;
2374 snprintf(name, size, "[q%u]%s", nb,
2375 rte_txgbe_qp_strings[st].name);
2378 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2380 return -(int)(id + 1);
2384 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2388 /* Extended stats from txgbe_hw_stats */
2389 if (id < TXGBE_NB_HW_STATS) {
2390 *offset = rte_txgbe_stats_strings[id].offset;
2393 id -= TXGBE_NB_HW_STATS;
2395 /* Priority Stats */
2396 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2397 nb = id / TXGBE_NB_UP_STATS;
2398 st = id % TXGBE_NB_UP_STATS;
2399 *offset = rte_txgbe_up_strings[st].offset +
2400 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2403 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2406 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2407 nb = id / TXGBE_NB_QP_STATS;
2408 st = id % TXGBE_NB_QP_STATS;
2409 *offset = rte_txgbe_qp_strings[st].offset +
2410 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2417 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2418 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2420 unsigned int i, count;
2422 count = txgbe_xstats_calc_num(dev);
2423 if (xstats_names == NULL)
2426 /* Note: limit >= cnt_stats checked upstream
2427 * in rte_eth_xstats_names()
2429 limit = min(limit, count);
2431 /* Extended stats from txgbe_hw_stats */
2432 for (i = 0; i < limit; i++) {
2433 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2434 sizeof(xstats_names[i].name))) {
2435 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2443 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2444 const uint64_t *ids,
2445 struct rte_eth_xstat_name *xstats_names,
2451 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2453 for (i = 0; i < limit; i++) {
2454 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2455 sizeof(xstats_names[i].name))) {
2456 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2465 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2468 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2469 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2470 unsigned int i, count;
2472 txgbe_read_stats_registers(hw, hw_stats);
2474 /* If this is a reset xstats is NULL, and we have cleared the
2475 * registers by reading them.
2477 count = txgbe_xstats_calc_num(dev);
2481 limit = min(limit, txgbe_xstats_calc_num(dev));
2483 /* Extended stats from txgbe_hw_stats */
2484 for (i = 0; i < limit; i++) {
2485 uint32_t offset = 0;
2487 if (txgbe_get_offset_by_id(i, &offset)) {
2488 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2491 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2499 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2502 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2503 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2504 unsigned int i, count;
2506 txgbe_read_stats_registers(hw, hw_stats);
2508 /* If this is a reset xstats is NULL, and we have cleared the
2509 * registers by reading them.
2511 count = txgbe_xstats_calc_num(dev);
2515 limit = min(limit, txgbe_xstats_calc_num(dev));
2517 /* Extended stats from txgbe_hw_stats */
2518 for (i = 0; i < limit; i++) {
2521 if (txgbe_get_offset_by_id(i, &offset)) {
2522 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2525 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2532 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2533 uint64_t *values, unsigned int limit)
2535 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2539 return txgbe_dev_xstats_get_(dev, values, limit);
2541 for (i = 0; i < limit; i++) {
2544 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2545 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2548 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2555 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2557 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2558 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2560 /* HW registers are cleared on read */
2561 hw->offset_loaded = 0;
2562 txgbe_read_stats_registers(hw, hw_stats);
2563 hw->offset_loaded = 1;
2565 /* Reset software totals */
2566 memset(hw_stats, 0, sizeof(*hw_stats));
2572 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2574 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2578 hw->phy.get_fw_version(hw, &etrack_id);
2580 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2584 ret += 1; /* add the size of '\0' */
2585 if (fw_size < (size_t)ret)
2592 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2594 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2595 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2597 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2598 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2599 dev_info->min_rx_bufsize = 1024;
2600 dev_info->max_rx_pktlen = 15872;
2601 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2602 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2603 dev_info->max_vfs = pci_dev->max_vfs;
2604 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
2605 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2606 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2607 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2608 dev_info->rx_queue_offload_capa);
2609 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2610 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2612 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2614 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2615 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2616 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2618 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2623 dev_info->default_txconf = (struct rte_eth_txconf) {
2625 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2626 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2627 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2629 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2633 dev_info->rx_desc_lim = rx_desc_lim;
2634 dev_info->tx_desc_lim = tx_desc_lim;
2636 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2637 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
2638 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2640 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
2641 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
2643 /* Driver-preferred Rx/Tx parameters */
2644 dev_info->default_rxportconf.burst_size = 32;
2645 dev_info->default_txportconf.burst_size = 32;
2646 dev_info->default_rxportconf.nb_queues = 1;
2647 dev_info->default_txportconf.nb_queues = 1;
2648 dev_info->default_rxportconf.ring_size = 256;
2649 dev_info->default_txportconf.ring_size = 256;
2655 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2657 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2658 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2659 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2660 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2661 return txgbe_get_supported_ptypes();
2667 txgbe_dev_setup_link_alarm_handler(void *param)
2669 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2670 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2671 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2673 bool autoneg = false;
2675 speed = hw->phy.autoneg_advertised;
2677 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2679 hw->mac.setup_link(hw, speed, true);
2681 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2684 /* return 0 means link status changed, -1 means not changed */
2686 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2687 int wait_to_complete)
2689 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2690 struct rte_eth_link link;
2691 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2692 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2697 memset(&link, 0, sizeof(link));
2698 link.link_status = RTE_ETH_LINK_DOWN;
2699 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2700 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2701 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2702 RTE_ETH_LINK_AUTONEG);
2704 hw->mac.get_link_status = true;
2706 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2707 return rte_eth_linkstatus_set(dev, &link);
2709 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2710 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2713 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2716 link.link_speed = RTE_ETH_SPEED_NUM_100M;
2717 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2718 return rte_eth_linkstatus_set(dev, &link);
2722 if ((hw->subsystem_device_id & 0xFF) ==
2723 TXGBE_DEV_ID_KR_KX_KX4) {
2724 hw->mac.bp_down_event(hw);
2725 } else if (hw->phy.media_type == txgbe_media_type_fiber) {
2726 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2727 rte_eal_alarm_set(10,
2728 txgbe_dev_setup_link_alarm_handler, dev);
2730 return rte_eth_linkstatus_set(dev, &link);
2731 } else if (!hw->dev_start) {
2732 return rte_eth_linkstatus_set(dev, &link);
2735 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2736 link.link_status = RTE_ETH_LINK_UP;
2737 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2739 switch (link_speed) {
2741 case TXGBE_LINK_SPEED_UNKNOWN:
2742 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2743 link.link_speed = RTE_ETH_SPEED_NUM_100M;
2746 case TXGBE_LINK_SPEED_100M_FULL:
2747 link.link_speed = RTE_ETH_SPEED_NUM_100M;
2750 case TXGBE_LINK_SPEED_1GB_FULL:
2751 link.link_speed = RTE_ETH_SPEED_NUM_1G;
2754 case TXGBE_LINK_SPEED_2_5GB_FULL:
2755 link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
2758 case TXGBE_LINK_SPEED_5GB_FULL:
2759 link.link_speed = RTE_ETH_SPEED_NUM_5G;
2762 case TXGBE_LINK_SPEED_10GB_FULL:
2763 link.link_speed = RTE_ETH_SPEED_NUM_10G;
2767 return rte_eth_linkstatus_set(dev, &link);
2771 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2773 return txgbe_dev_link_update_share(dev, wait_to_complete);
2777 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2779 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2782 fctrl = rd32(hw, TXGBE_PSRCTL);
2783 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2784 wr32(hw, TXGBE_PSRCTL, fctrl);
2790 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2792 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2795 fctrl = rd32(hw, TXGBE_PSRCTL);
2796 fctrl &= (~TXGBE_PSRCTL_UCP);
2797 if (dev->data->all_multicast == 1)
2798 fctrl |= TXGBE_PSRCTL_MCP;
2800 fctrl &= (~TXGBE_PSRCTL_MCP);
2801 wr32(hw, TXGBE_PSRCTL, fctrl);
2807 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2809 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2812 fctrl = rd32(hw, TXGBE_PSRCTL);
2813 fctrl |= TXGBE_PSRCTL_MCP;
2814 wr32(hw, TXGBE_PSRCTL, fctrl);
2820 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2822 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2825 if (dev->data->promiscuous == 1)
2826 return 0; /* must remain in all_multicast mode */
2828 fctrl = rd32(hw, TXGBE_PSRCTL);
2829 fctrl &= (~TXGBE_PSRCTL_MCP);
2830 wr32(hw, TXGBE_PSRCTL, fctrl);
2836 * It clears the interrupt causes and enables the interrupt.
2837 * It will be called once only during nic initialized.
2840 * Pointer to struct rte_eth_dev.
2842 * Enable or Disable.
2845 * - On success, zero.
2846 * - On failure, a negative value.
2849 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2851 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2853 txgbe_dev_link_status_print(dev);
2855 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2857 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2863 txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2865 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2868 mask = TXGBE_ICR_MASK;
2869 mask &= (1ULL << TXGBE_MISC_VEC_ID);
2871 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
2872 intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
2877 * It clears the interrupt causes and enables the interrupt.
2878 * It will be called once only during nic initialized.
2881 * Pointer to struct rte_eth_dev.
2884 * - On success, zero.
2885 * - On failure, a negative value.
2888 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2890 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2893 mask = TXGBE_ICR_MASK;
2894 mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
2901 * It clears the interrupt causes and enables the interrupt.
2902 * It will be called once only during nic initialized.
2905 * Pointer to struct rte_eth_dev.
2908 * - On success, zero.
2909 * - On failure, a negative value.
2912 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2914 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2916 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2922 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2925 * Pointer to struct rte_eth_dev.
2928 * - On success, zero.
2929 * - On failure, a negative value.
2932 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
2933 struct rte_intr_handle *intr_handle)
2936 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2937 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2939 if (intr_handle->type != RTE_INTR_HANDLE_UIO &&
2940 intr_handle->type != RTE_INTR_HANDLE_VFIO_MSIX)
2941 wr32(hw, TXGBE_PX_INTA, 1);
2943 /* clear all cause mask */
2944 txgbe_disable_intr(hw);
2946 /* read-on-clear nic registers here */
2947 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2948 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2952 /* set flag for async link update */
2953 if (eicr & TXGBE_ICRMISC_LSC)
2954 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2956 if (eicr & TXGBE_ICRMISC_ANDONE)
2957 intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
2959 if (eicr & TXGBE_ICRMISC_VFMBX)
2960 intr->flags |= TXGBE_FLAG_MAILBOX;
2962 if (eicr & TXGBE_ICRMISC_LNKSEC)
2963 intr->flags |= TXGBE_FLAG_MACSEC;
2965 if (eicr & TXGBE_ICRMISC_GPIO)
2966 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2972 * It gets and then prints the link status.
2975 * Pointer to struct rte_eth_dev.
2978 * - On success, zero.
2979 * - On failure, a negative value.
2982 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2984 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2985 struct rte_eth_link link;
2987 rte_eth_linkstatus_get(dev, &link);
2989 if (link.link_status) {
2990 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2991 (int)(dev->data->port_id),
2992 (unsigned int)link.link_speed,
2993 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2994 "full-duplex" : "half-duplex");
2996 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2997 (int)(dev->data->port_id));
2999 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
3000 pci_dev->addr.domain,
3002 pci_dev->addr.devid,
3003 pci_dev->addr.function);
3007 * It executes link_update after knowing an interrupt occurred.
3010 * Pointer to struct rte_eth_dev.
3013 * - On success, zero.
3014 * - On failure, a negative value.
3017 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
3018 struct rte_intr_handle *intr_handle)
3020 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3022 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3024 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3026 if (intr->flags & TXGBE_FLAG_MAILBOX) {
3027 txgbe_pf_mbx_process(dev);
3028 intr->flags &= ~TXGBE_FLAG_MAILBOX;
3031 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3032 hw->phy.handle_lasi(hw);
3033 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3036 if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
3037 if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
3038 hw->mac.kr_handle(hw);
3039 intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
3043 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3044 struct rte_eth_link link;
3046 /*get the link status before link update, for predicting later*/
3047 rte_eth_linkstatus_get(dev, &link);
3049 txgbe_dev_link_update(dev, 0);
3052 if (!link.link_status)
3053 /* handle it 1 sec later, wait it being stable */
3054 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
3055 /* likely to down */
3056 else if ((hw->subsystem_device_id & 0xFF) ==
3057 TXGBE_DEV_ID_KR_KX_KX4 &&
3058 hw->devarg.auto_neg == 1)
3059 /* handle it 2 sec later for backplane AN73 */
3062 /* handle it 4 sec later, wait it being stable */
3063 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
3065 txgbe_dev_link_status_print(dev);
3066 if (rte_eal_alarm_set(timeout * 1000,
3067 txgbe_dev_interrupt_delayed_handler,
3069 PMD_DRV_LOG(ERR, "Error setting alarm");
3071 /* only disable lsc interrupt */
3072 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
3074 intr->mask_orig = intr->mask;
3075 /* only disable all misc interrupts */
3076 intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
3080 PMD_DRV_LOG(DEBUG, "enable intr immediately");
3081 txgbe_enable_intr(dev);
3082 rte_intr_enable(intr_handle);
3088 * Interrupt handler which shall be registered for alarm callback for delayed
3089 * handling specific interrupt to wait for the stable nic state. As the
3090 * NIC interrupt state is not stable for txgbe after link is just down,
3091 * it needs to wait 4 seconds to get the stable status.
3094 * Pointer to interrupt handle.
3096 * The address of parameter (struct rte_eth_dev *) registered before.
3102 txgbe_dev_interrupt_delayed_handler(void *param)
3104 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3105 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3106 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3107 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3108 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3111 txgbe_disable_intr(hw);
3113 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3114 if (eicr & TXGBE_ICRMISC_VFMBX)
3115 txgbe_pf_mbx_process(dev);
3117 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3118 hw->phy.handle_lasi(hw);
3119 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3122 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3123 txgbe_dev_link_update(dev, 0);
3124 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3125 txgbe_dev_link_status_print(dev);
3126 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3130 if (intr->flags & TXGBE_FLAG_MACSEC) {
3131 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3133 intr->flags &= ~TXGBE_FLAG_MACSEC;
3136 /* restore original mask */
3137 intr->mask_misc |= TXGBE_ICRMISC_LSC;
3139 intr->mask = intr->mask_orig;
3140 intr->mask_orig = 0;
3142 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3143 txgbe_enable_intr(dev);
3144 rte_intr_enable(intr_handle);
3148 * Interrupt handler triggered by NIC for handling
3149 * specific interrupt.
3152 * Pointer to interrupt handle.
3154 * The address of parameter (struct rte_eth_dev *) registered before.
3160 txgbe_dev_interrupt_handler(void *param)
3162 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3164 txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
3165 txgbe_dev_interrupt_action(dev, dev->intr_handle);
3169 txgbe_dev_led_on(struct rte_eth_dev *dev)
3171 struct txgbe_hw *hw;
3173 hw = TXGBE_DEV_HW(dev);
3174 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3178 txgbe_dev_led_off(struct rte_eth_dev *dev)
3180 struct txgbe_hw *hw;
3182 hw = TXGBE_DEV_HW(dev);
3183 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3187 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3189 struct txgbe_hw *hw;
3195 hw = TXGBE_DEV_HW(dev);
3197 fc_conf->pause_time = hw->fc.pause_time;
3198 fc_conf->high_water = hw->fc.high_water[0];
3199 fc_conf->low_water = hw->fc.low_water[0];
3200 fc_conf->send_xon = hw->fc.send_xon;
3201 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3204 * Return rx_pause status according to actual setting of
3207 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3208 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3214 * Return tx_pause status according to actual setting of
3217 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3218 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3223 if (rx_pause && tx_pause)
3224 fc_conf->mode = RTE_ETH_FC_FULL;
3226 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
3228 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
3230 fc_conf->mode = RTE_ETH_FC_NONE;
3236 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3238 struct txgbe_hw *hw;
3240 uint32_t rx_buf_size;
3241 uint32_t max_high_water;
3242 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3249 PMD_INIT_FUNC_TRACE();
3251 hw = TXGBE_DEV_HW(dev);
3252 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3253 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3256 * At least reserve one Ethernet frame for watermark
3257 * high_water/low_water in kilo bytes for txgbe
3259 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3260 if (fc_conf->high_water > max_high_water ||
3261 fc_conf->high_water < fc_conf->low_water) {
3262 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3263 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3267 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3268 hw->fc.pause_time = fc_conf->pause_time;
3269 hw->fc.high_water[0] = fc_conf->high_water;
3270 hw->fc.low_water[0] = fc_conf->low_water;
3271 hw->fc.send_xon = fc_conf->send_xon;
3272 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3274 err = txgbe_fc_enable(hw);
3276 /* Not negotiated is not an error case */
3277 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3278 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3279 (fc_conf->mac_ctrl_frame_fwd
3280 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3286 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3291 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3292 struct rte_eth_pfc_conf *pfc_conf)
3295 uint32_t rx_buf_size;
3296 uint32_t max_high_water;
3298 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
3299 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3300 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3302 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3309 PMD_INIT_FUNC_TRACE();
3311 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3312 tc_num = map[pfc_conf->priority];
3313 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3314 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3316 * At least reserve one Ethernet frame for watermark
3317 * high_water/low_water in kilo bytes for txgbe
3319 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3320 if (pfc_conf->fc.high_water > max_high_water ||
3321 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3322 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3323 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3327 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3328 hw->fc.pause_time = pfc_conf->fc.pause_time;
3329 hw->fc.send_xon = pfc_conf->fc.send_xon;
3330 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
3331 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3333 err = txgbe_dcb_pfc_enable(hw, tc_num);
3335 /* Not negotiated is not an error case */
3336 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3339 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3344 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3345 struct rte_eth_rss_reta_entry64 *reta_conf,
3350 uint16_t idx, shift;
3351 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3352 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3354 PMD_INIT_FUNC_TRACE();
3356 if (!txgbe_rss_update_sp(hw->mac.type)) {
3357 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3362 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3363 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3364 "(%d) doesn't match the number hardware can supported "
3365 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3369 for (i = 0; i < reta_size; i += 4) {
3370 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3371 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3372 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3376 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3377 for (j = 0; j < 4; j++) {
3378 if (RS8(mask, j, 0x1)) {
3379 reta &= ~(MS32(8 * j, 0xFF));
3380 reta |= LS32(reta_conf[idx].reta[shift + j],
3384 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3386 adapter->rss_reta_updated = 1;
3392 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3393 struct rte_eth_rss_reta_entry64 *reta_conf,
3396 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3399 uint16_t idx, shift;
3401 PMD_INIT_FUNC_TRACE();
3403 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3404 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3405 "(%d) doesn't match the number hardware can supported "
3406 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3410 for (i = 0; i < reta_size; i += 4) {
3411 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3412 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3413 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3417 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3418 for (j = 0; j < 4; j++) {
3419 if (RS8(mask, j, 0x1))
3420 reta_conf[idx].reta[shift + j] =
3421 (uint16_t)RS32(reta, 8 * j, 0xFF);
3429 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3430 uint32_t index, uint32_t pool)
3432 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3433 uint32_t enable_addr = 1;
3435 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3440 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3442 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3444 txgbe_clear_rar(hw, index);
3448 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3450 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3452 txgbe_remove_rar(dev, 0);
3453 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3459 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3461 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3462 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3463 struct rte_eth_dev_data *dev_data = dev->data;
3465 /* If device is started, refuse mtu that requires the support of
3466 * scattered packets when this feature has not been enabled before.
3468 if (dev_data->dev_started && !dev_data->scattered_rx &&
3469 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3470 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3471 PMD_INIT_LOG(ERR, "Stop port first.");
3476 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3477 TXGBE_FRAME_SIZE_MAX);
3479 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3480 TXGBE_FRMSZ_MAX(frame_size));
3486 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3488 uint32_t vector = 0;
3490 switch (hw->mac.mc_filter_type) {
3491 case 0: /* use bits [47:36] of the address */
3492 vector = ((uc_addr->addr_bytes[4] >> 4) |
3493 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3495 case 1: /* use bits [46:35] of the address */
3496 vector = ((uc_addr->addr_bytes[4] >> 3) |
3497 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3499 case 2: /* use bits [45:34] of the address */
3500 vector = ((uc_addr->addr_bytes[4] >> 2) |
3501 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3503 case 3: /* use bits [43:32] of the address */
3504 vector = ((uc_addr->addr_bytes[4]) |
3505 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3507 default: /* Invalid mc_filter_type */
3511 /* vector can only be 12-bits or boundary will be exceeded */
3517 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3518 struct rte_ether_addr *mac_addr, uint8_t on)
3526 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3527 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3529 /* The UTA table only exists on pf hardware */
3530 if (hw->mac.type < txgbe_mac_raptor)
3533 vector = txgbe_uta_vector(hw, mac_addr);
3534 uta_idx = (vector >> 5) & 0x7F;
3535 uta_mask = 0x1UL << (vector & 0x1F);
3537 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3540 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3542 uta_info->uta_in_use++;
3543 reg_val |= uta_mask;
3544 uta_info->uta_shadow[uta_idx] |= uta_mask;
3546 uta_info->uta_in_use--;
3547 reg_val &= ~uta_mask;
3548 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3551 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3553 psrctl = rd32(hw, TXGBE_PSRCTL);
3554 if (uta_info->uta_in_use > 0)
3555 psrctl |= TXGBE_PSRCTL_UCHFENA;
3557 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3559 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3560 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3561 wr32(hw, TXGBE_PSRCTL, psrctl);
3567 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3569 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3570 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3574 /* The UTA table only exists on pf hardware */
3575 if (hw->mac.type < txgbe_mac_raptor)
3579 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3580 uta_info->uta_shadow[i] = ~0;
3581 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3584 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3585 uta_info->uta_shadow[i] = 0;
3586 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3590 psrctl = rd32(hw, TXGBE_PSRCTL);
3592 psrctl |= TXGBE_PSRCTL_UCHFENA;
3594 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3596 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3597 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3598 wr32(hw, TXGBE_PSRCTL, psrctl);
3604 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3606 uint32_t new_val = orig_val;
3608 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
3609 new_val |= TXGBE_POOLETHCTL_UTA;
3610 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
3611 new_val |= TXGBE_POOLETHCTL_MCHA;
3612 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
3613 new_val |= TXGBE_POOLETHCTL_UCHA;
3614 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
3615 new_val |= TXGBE_POOLETHCTL_BCA;
3616 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
3617 new_val |= TXGBE_POOLETHCTL_MCP;
3623 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3625 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3626 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3628 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3630 if (queue_id < 32) {
3631 mask = rd32(hw, TXGBE_IMS(0));
3632 mask &= (1 << queue_id);
3633 wr32(hw, TXGBE_IMS(0), mask);
3634 } else if (queue_id < 64) {
3635 mask = rd32(hw, TXGBE_IMS(1));
3636 mask &= (1 << (queue_id - 32));
3637 wr32(hw, TXGBE_IMS(1), mask);
3639 rte_intr_enable(intr_handle);
3645 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3648 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3650 if (queue_id < 32) {
3651 mask = rd32(hw, TXGBE_IMS(0));
3652 mask &= ~(1 << queue_id);
3653 wr32(hw, TXGBE_IMS(0), mask);
3654 } else if (queue_id < 64) {
3655 mask = rd32(hw, TXGBE_IMS(1));
3656 mask &= ~(1 << (queue_id - 32));
3657 wr32(hw, TXGBE_IMS(1), mask);
3664 * set the IVAR registers, mapping interrupt causes to vectors
3666 * pointer to txgbe_hw struct
3668 * 0 for Rx, 1 for Tx, -1 for other causes
3670 * queue to map the corresponding interrupt to
3672 * the vector to map to the corresponding queue
3675 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3676 uint8_t queue, uint8_t msix_vector)
3680 if (direction == -1) {
3682 msix_vector |= TXGBE_IVARMISC_VLD;
3684 tmp = rd32(hw, TXGBE_IVARMISC);
3685 tmp &= ~(0xFF << idx);
3686 tmp |= (msix_vector << idx);
3687 wr32(hw, TXGBE_IVARMISC, tmp);
3689 /* rx or tx causes */
3690 /* Workround for ICR lost */
3691 idx = ((16 * (queue & 1)) + (8 * direction));
3692 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3693 tmp &= ~(0xFF << idx);
3694 tmp |= (msix_vector << idx);
3695 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3700 * Sets up the hardware to properly generate MSI-X interrupts
3702 * board private structure
3705 txgbe_configure_msix(struct rte_eth_dev *dev)
3707 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3708 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3709 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3710 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3711 uint32_t vec = TXGBE_MISC_VEC_ID;
3714 /* won't configure msix register if no mapping is done
3715 * between intr vector and event fd
3716 * but if misx has been enabled already, need to configure
3717 * auto clean, auto mask and throttling.
3719 gpie = rd32(hw, TXGBE_GPIE);
3720 if (!rte_intr_dp_is_en(intr_handle) &&
3721 !(gpie & TXGBE_GPIE_MSIX))
3724 if (rte_intr_allow_others(intr_handle)) {
3725 base = TXGBE_RX_VEC_START;
3729 /* setup GPIE for MSI-x mode */
3730 gpie = rd32(hw, TXGBE_GPIE);
3731 gpie |= TXGBE_GPIE_MSIX;
3732 wr32(hw, TXGBE_GPIE, gpie);
3734 /* Populate the IVAR table and set the ITR values to the
3735 * corresponding register.
3737 if (rte_intr_dp_is_en(intr_handle)) {
3738 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3740 /* by default, 1:1 mapping */
3741 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3742 intr_handle->intr_vec[queue_id] = vec;
3743 if (vec < base + intr_handle->nb_efd - 1)
3747 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3749 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3750 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3755 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3756 uint16_t queue_idx, uint16_t tx_rate)
3758 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3761 if (queue_idx >= hw->mac.max_tx_queues)
3765 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3766 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3772 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3773 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3775 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3777 /* Set ARBTXRATE of queue X */
3778 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3779 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3786 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3787 struct rte_eth_syn_filter *filter,
3790 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3791 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3795 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3798 syn_info = filter_info->syn_info;
3801 if (syn_info & TXGBE_SYNCLS_ENA)
3803 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3804 synqf |= TXGBE_SYNCLS_ENA;
3806 if (filter->hig_pri)
3807 synqf |= TXGBE_SYNCLS_HIPRIO;
3809 synqf &= ~TXGBE_SYNCLS_HIPRIO;
3811 synqf = rd32(hw, TXGBE_SYNCLS);
3812 if (!(syn_info & TXGBE_SYNCLS_ENA))
3814 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3817 filter_info->syn_info = synqf;
3818 wr32(hw, TXGBE_SYNCLS, synqf);
3823 static inline enum txgbe_5tuple_protocol
3824 convert_protocol_type(uint8_t protocol_value)
3826 if (protocol_value == IPPROTO_TCP)
3827 return TXGBE_5TF_PROT_TCP;
3828 else if (protocol_value == IPPROTO_UDP)
3829 return TXGBE_5TF_PROT_UDP;
3830 else if (protocol_value == IPPROTO_SCTP)
3831 return TXGBE_5TF_PROT_SCTP;
3833 return TXGBE_5TF_PROT_NONE;
3836 /* inject a 5-tuple filter to HW */
3838 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3839 struct txgbe_5tuple_filter *filter)
3841 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3843 uint32_t ftqf, sdpqf;
3844 uint32_t l34timir = 0;
3845 uint32_t mask = TXGBE_5TFCTL0_MASK;
3848 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3849 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3851 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3852 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3853 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3854 mask &= ~TXGBE_5TFCTL0_MSADDR;
3855 if (filter->filter_info.dst_ip_mask == 0)
3856 mask &= ~TXGBE_5TFCTL0_MDADDR;
3857 if (filter->filter_info.src_port_mask == 0)
3858 mask &= ~TXGBE_5TFCTL0_MSPORT;
3859 if (filter->filter_info.dst_port_mask == 0)
3860 mask &= ~TXGBE_5TFCTL0_MDPORT;
3861 if (filter->filter_info.proto_mask == 0)
3862 mask &= ~TXGBE_5TFCTL0_MPROTO;
3864 ftqf |= TXGBE_5TFCTL0_MPOOL;
3865 ftqf |= TXGBE_5TFCTL0_ENA;
3867 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3868 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3869 wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3870 wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3872 l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3873 wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3877 * add a 5tuple filter
3880 * dev: Pointer to struct rte_eth_dev.
3881 * index: the index the filter allocates.
3882 * filter: pointer to the filter that will be added.
3883 * rx_queue: the queue id the filter assigned to.
3886 * - On success, zero.
3887 * - On failure, a negative value.
3890 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3891 struct txgbe_5tuple_filter *filter)
3893 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3897 * look for an unused 5tuple filter index,
3898 * and insert the filter to list.
3900 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3901 idx = i / (sizeof(uint32_t) * NBBY);
3902 shift = i % (sizeof(uint32_t) * NBBY);
3903 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3904 filter_info->fivetuple_mask[idx] |= 1 << shift;
3906 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3912 if (i >= TXGBE_MAX_FTQF_FILTERS) {
3913 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3917 txgbe_inject_5tuple_filter(dev, filter);
3923 * remove a 5tuple filter
3926 * dev: Pointer to struct rte_eth_dev.
3927 * filter: the pointer of the filter will be removed.
3930 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3931 struct txgbe_5tuple_filter *filter)
3933 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3934 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3935 uint16_t index = filter->index;
3937 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3938 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3939 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3942 wr32(hw, TXGBE_5TFDADDR(index), 0);
3943 wr32(hw, TXGBE_5TFSADDR(index), 0);
3944 wr32(hw, TXGBE_5TFPORT(index), 0);
3945 wr32(hw, TXGBE_5TFCTL0(index), 0);
3946 wr32(hw, TXGBE_5TFCTL1(index), 0);
3949 static inline struct txgbe_5tuple_filter *
3950 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3951 struct txgbe_5tuple_filter_info *key)
3953 struct txgbe_5tuple_filter *it;
3955 TAILQ_FOREACH(it, filter_list, entries) {
3956 if (memcmp(key, &it->filter_info,
3957 sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3964 /* translate elements in struct rte_eth_ntuple_filter
3965 * to struct txgbe_5tuple_filter_info
3968 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3969 struct txgbe_5tuple_filter_info *filter_info)
3971 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3972 filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3973 filter->priority < TXGBE_5TUPLE_MIN_PRI)
3976 switch (filter->dst_ip_mask) {
3978 filter_info->dst_ip_mask = 0;
3979 filter_info->dst_ip = filter->dst_ip;
3982 filter_info->dst_ip_mask = 1;
3985 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3989 switch (filter->src_ip_mask) {
3991 filter_info->src_ip_mask = 0;
3992 filter_info->src_ip = filter->src_ip;
3995 filter_info->src_ip_mask = 1;
3998 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4002 switch (filter->dst_port_mask) {
4004 filter_info->dst_port_mask = 0;
4005 filter_info->dst_port = filter->dst_port;
4008 filter_info->dst_port_mask = 1;
4011 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4015 switch (filter->src_port_mask) {
4017 filter_info->src_port_mask = 0;
4018 filter_info->src_port = filter->src_port;
4021 filter_info->src_port_mask = 1;
4024 PMD_DRV_LOG(ERR, "invalid src_port mask.");
4028 switch (filter->proto_mask) {
4030 filter_info->proto_mask = 0;
4031 filter_info->proto =
4032 convert_protocol_type(filter->proto);
4035 filter_info->proto_mask = 1;
4038 PMD_DRV_LOG(ERR, "invalid protocol mask.");
4042 filter_info->priority = (uint8_t)filter->priority;
4047 * add or delete a ntuple filter
4050 * dev: Pointer to struct rte_eth_dev.
4051 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4052 * add: if true, add filter, if false, remove filter
4055 * - On success, zero.
4056 * - On failure, a negative value.
4059 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
4060 struct rte_eth_ntuple_filter *ntuple_filter,
4063 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4064 struct txgbe_5tuple_filter_info filter_5tuple;
4065 struct txgbe_5tuple_filter *filter;
4068 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4069 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4073 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
4074 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4078 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4080 if (filter != NULL && add) {
4081 PMD_DRV_LOG(ERR, "filter exists.");
4084 if (filter == NULL && !add) {
4085 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4090 filter = rte_zmalloc("txgbe_5tuple_filter",
4091 sizeof(struct txgbe_5tuple_filter), 0);
4094 rte_memcpy(&filter->filter_info,
4096 sizeof(struct txgbe_5tuple_filter_info));
4097 filter->queue = ntuple_filter->queue;
4098 ret = txgbe_add_5tuple_filter(dev, filter);
4104 txgbe_remove_5tuple_filter(dev, filter);
4111 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4112 struct rte_eth_ethertype_filter *filter,
4115 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4116 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4120 struct txgbe_ethertype_filter ethertype_filter;
4122 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4125 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4126 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4127 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4128 " ethertype filter.", filter->ether_type);
4132 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4133 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4136 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4137 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4141 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4142 if (ret >= 0 && add) {
4143 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4144 filter->ether_type);
4147 if (ret < 0 && !add) {
4148 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4149 filter->ether_type);
4154 etqf = TXGBE_ETFLT_ENA;
4155 etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4156 etqs |= TXGBE_ETCLS_QPID(filter->queue);
4157 etqs |= TXGBE_ETCLS_QENA;
4159 ethertype_filter.ethertype = filter->ether_type;
4160 ethertype_filter.etqf = etqf;
4161 ethertype_filter.etqs = etqs;
4162 ethertype_filter.conf = FALSE;
4163 ret = txgbe_ethertype_filter_insert(filter_info,
4166 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4170 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4174 wr32(hw, TXGBE_ETFLT(ret), etqf);
4175 wr32(hw, TXGBE_ETCLS(ret), etqs);
4182 txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
4183 const struct rte_flow_ops **ops)
4185 *ops = &txgbe_flow_ops;
4190 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4191 u8 **mc_addr_ptr, u32 *vmdq)
4196 mc_addr = *mc_addr_ptr;
4197 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4202 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4203 struct rte_ether_addr *mc_addr_set,
4204 uint32_t nb_mc_addr)
4206 struct txgbe_hw *hw;
4209 hw = TXGBE_DEV_HW(dev);
4210 mc_addr_list = (u8 *)mc_addr_set;
4211 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4212 txgbe_dev_addr_list_itr, TRUE);
4216 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4218 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4219 uint64_t systime_cycles;
4221 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4222 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4224 return systime_cycles;
4228 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4230 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4231 uint64_t rx_tstamp_cycles;
4233 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4234 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4235 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4237 return rx_tstamp_cycles;
4241 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4243 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4244 uint64_t tx_tstamp_cycles;
4246 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4247 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4248 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4250 return tx_tstamp_cycles;
4254 txgbe_start_timecounters(struct rte_eth_dev *dev)
4256 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4257 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4258 struct rte_eth_link link;
4259 uint32_t incval = 0;
4262 /* Get current link speed. */
4263 txgbe_dev_link_update(dev, 1);
4264 rte_eth_linkstatus_get(dev, &link);
4266 switch (link.link_speed) {
4267 case RTE_ETH_SPEED_NUM_100M:
4268 incval = TXGBE_INCVAL_100;
4269 shift = TXGBE_INCVAL_SHIFT_100;
4271 case RTE_ETH_SPEED_NUM_1G:
4272 incval = TXGBE_INCVAL_1GB;
4273 shift = TXGBE_INCVAL_SHIFT_1GB;
4275 case RTE_ETH_SPEED_NUM_10G:
4277 incval = TXGBE_INCVAL_10GB;
4278 shift = TXGBE_INCVAL_SHIFT_10GB;
4282 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4284 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4285 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4286 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4288 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4289 adapter->systime_tc.cc_shift = shift;
4290 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4292 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4293 adapter->rx_tstamp_tc.cc_shift = shift;
4294 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4296 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4297 adapter->tx_tstamp_tc.cc_shift = shift;
4298 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4302 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4304 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4306 adapter->systime_tc.nsec += delta;
4307 adapter->rx_tstamp_tc.nsec += delta;
4308 adapter->tx_tstamp_tc.nsec += delta;
4314 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4317 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4319 ns = rte_timespec_to_ns(ts);
4320 /* Set the timecounters to a new value. */
4321 adapter->systime_tc.nsec = ns;
4322 adapter->rx_tstamp_tc.nsec = ns;
4323 adapter->tx_tstamp_tc.nsec = ns;
4329 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4331 uint64_t ns, systime_cycles;
4332 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4334 systime_cycles = txgbe_read_systime_cyclecounter(dev);
4335 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4336 *ts = rte_ns_to_timespec(ns);
4342 txgbe_timesync_enable(struct rte_eth_dev *dev)
4344 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4347 /* Stop the timesync system time. */
4348 wr32(hw, TXGBE_TSTIMEINC, 0x0);
4349 /* Reset the timesync system time value. */
4350 wr32(hw, TXGBE_TSTIMEL, 0x0);
4351 wr32(hw, TXGBE_TSTIMEH, 0x0);
4353 txgbe_start_timecounters(dev);
4355 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4356 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4357 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4359 /* Enable timestamping of received PTP packets. */
4360 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4361 tsync_ctl |= TXGBE_TSRXCTL_ENA;
4362 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4364 /* Enable timestamping of transmitted PTP packets. */
4365 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4366 tsync_ctl |= TXGBE_TSTXCTL_ENA;
4367 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4375 txgbe_timesync_disable(struct rte_eth_dev *dev)
4377 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4380 /* Disable timestamping of transmitted PTP packets. */
4381 tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4382 tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4383 wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4385 /* Disable timestamping of received PTP packets. */
4386 tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4387 tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4388 wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4390 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4391 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4393 /* Stop incrementating the System Time registers. */
4394 wr32(hw, TXGBE_TSTIMEINC, 0);
4400 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4401 struct timespec *timestamp,
4402 uint32_t flags __rte_unused)
4404 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4405 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4406 uint32_t tsync_rxctl;
4407 uint64_t rx_tstamp_cycles;
4410 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4411 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4414 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4415 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4416 *timestamp = rte_ns_to_timespec(ns);
4422 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4423 struct timespec *timestamp)
4425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4426 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4427 uint32_t tsync_txctl;
4428 uint64_t tx_tstamp_cycles;
4431 tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4432 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4435 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4436 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4437 *timestamp = rte_ns_to_timespec(ns);
4443 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4447 const struct reg_info *reg_group;
4448 const struct reg_info **reg_set = txgbe_regs_others;
4450 while ((reg_group = reg_set[g_ind++]))
4451 count += txgbe_regs_group_count(reg_group);
4457 txgbe_get_regs(struct rte_eth_dev *dev,
4458 struct rte_dev_reg_info *regs)
4460 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4461 uint32_t *data = regs->data;
4464 const struct reg_info *reg_group;
4465 const struct reg_info **reg_set = txgbe_regs_others;
4468 regs->length = txgbe_get_reg_length(dev);
4469 regs->width = sizeof(uint32_t);
4473 /* Support only full register dump */
4474 if (regs->length == 0 ||
4475 regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4476 regs->version = hw->mac.type << 24 |
4477 hw->revision_id << 16 |
4479 while ((reg_group = reg_set[g_ind++]))
4480 count += txgbe_read_regs_group(dev, &data[count],
4489 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4491 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4493 /* Return unit is byte count */
4494 return hw->rom.word_size * 2;
4498 txgbe_get_eeprom(struct rte_eth_dev *dev,
4499 struct rte_dev_eeprom_info *in_eeprom)
4501 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4502 struct txgbe_rom_info *eeprom = &hw->rom;
4503 uint16_t *data = in_eeprom->data;
4506 first = in_eeprom->offset >> 1;
4507 length = in_eeprom->length >> 1;
4508 if (first > hw->rom.word_size ||
4509 ((first + length) > hw->rom.word_size))
4512 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4514 return eeprom->readw_buffer(hw, first, length, data);
4518 txgbe_set_eeprom(struct rte_eth_dev *dev,
4519 struct rte_dev_eeprom_info *in_eeprom)
4521 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4522 struct txgbe_rom_info *eeprom = &hw->rom;
4523 uint16_t *data = in_eeprom->data;
4526 first = in_eeprom->offset >> 1;
4527 length = in_eeprom->length >> 1;
4528 if (first > hw->rom.word_size ||
4529 ((first + length) > hw->rom.word_size))
4532 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4534 return eeprom->writew_buffer(hw, first, length, data);
4538 txgbe_get_module_info(struct rte_eth_dev *dev,
4539 struct rte_eth_dev_module_info *modinfo)
4541 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4543 uint8_t sff8472_rev, addr_mode;
4544 bool page_swap = false;
4546 /* Check whether we support SFF-8472 or not */
4547 status = hw->phy.read_i2c_eeprom(hw,
4548 TXGBE_SFF_SFF_8472_COMP,
4553 /* addressing mode is not supported */
4554 status = hw->phy.read_i2c_eeprom(hw,
4555 TXGBE_SFF_SFF_8472_SWAP,
4560 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4562 "Address change required to access page 0xA2, "
4563 "but not supported. Please report the module "
4564 "type to the driver maintainers.");
4568 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4569 /* We have a SFP, but it does not support SFF-8472 */
4570 modinfo->type = RTE_ETH_MODULE_SFF_8079;
4571 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4573 /* We have a SFP which supports a revision of SFF-8472. */
4574 modinfo->type = RTE_ETH_MODULE_SFF_8472;
4575 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4582 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4583 struct rte_dev_eeprom_info *info)
4585 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4586 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4587 uint8_t databyte = 0xFF;
4588 uint8_t *data = info->data;
4591 if (info->length == 0)
4594 for (i = info->offset; i < info->offset + info->length; i++) {
4595 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4596 status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4598 status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4603 data[i - info->offset] = databyte;
4610 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4613 case txgbe_mac_raptor:
4614 case txgbe_mac_raptor_vf:
4622 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4623 struct rte_eth_dcb_info *dcb_info)
4625 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4626 struct txgbe_dcb_tc_config *tc;
4627 struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4631 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
4632 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4634 dcb_info->nb_tcs = 1;
4636 tc_queue = &dcb_info->tc_queue;
4637 nb_tcs = dcb_info->nb_tcs;
4639 if (dcb_config->vt_mode) { /* vt is enabled */
4640 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4641 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4642 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
4643 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4644 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4645 for (j = 0; j < nb_tcs; j++) {
4646 tc_queue->tc_rxq[0][j].base = j;
4647 tc_queue->tc_rxq[0][j].nb_queue = 1;
4648 tc_queue->tc_txq[0][j].base = j;
4649 tc_queue->tc_txq[0][j].nb_queue = 1;
4652 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4653 for (j = 0; j < nb_tcs; j++) {
4654 tc_queue->tc_rxq[i][j].base =
4656 tc_queue->tc_rxq[i][j].nb_queue = 1;
4657 tc_queue->tc_txq[i][j].base =
4659 tc_queue->tc_txq[i][j].nb_queue = 1;
4663 } else { /* vt is disabled */
4664 struct rte_eth_dcb_rx_conf *rx_conf =
4665 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4666 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
4667 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4668 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
4669 for (i = 0; i < dcb_info->nb_tcs; i++) {
4670 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4671 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4673 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4674 dcb_info->tc_queue.tc_txq[0][1].base = 64;
4675 dcb_info->tc_queue.tc_txq[0][2].base = 96;
4676 dcb_info->tc_queue.tc_txq[0][3].base = 112;
4677 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4678 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4679 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4680 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4681 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
4682 for (i = 0; i < dcb_info->nb_tcs; i++) {
4683 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4684 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4686 dcb_info->tc_queue.tc_txq[0][0].base = 0;
4687 dcb_info->tc_queue.tc_txq[0][1].base = 32;
4688 dcb_info->tc_queue.tc_txq[0][2].base = 64;
4689 dcb_info->tc_queue.tc_txq[0][3].base = 80;
4690 dcb_info->tc_queue.tc_txq[0][4].base = 96;
4691 dcb_info->tc_queue.tc_txq[0][5].base = 104;
4692 dcb_info->tc_queue.tc_txq[0][6].base = 112;
4693 dcb_info->tc_queue.tc_txq[0][7].base = 120;
4694 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4695 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4696 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4697 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4698 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4699 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4700 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4701 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4704 for (i = 0; i < dcb_info->nb_tcs; i++) {
4705 tc = &dcb_config->tc_config[i];
4706 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4711 /* Update e-tag ether type */
4713 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4714 uint16_t ether_type)
4716 uint32_t etag_etype;
4718 etag_etype = rd32(hw, TXGBE_EXTAG);
4719 etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4720 etag_etype |= ether_type;
4721 wr32(hw, TXGBE_EXTAG, etag_etype);
4727 /* Enable e-tag tunnel */
4729 txgbe_e_tag_enable(struct txgbe_hw *hw)
4731 uint32_t etag_etype;
4733 etag_etype = rd32(hw, TXGBE_PORTCTL);
4734 etag_etype |= TXGBE_PORTCTL_ETAG;
4735 wr32(hw, TXGBE_PORTCTL, etag_etype);
4742 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4743 struct txgbe_l2_tunnel_conf *l2_tunnel)
4746 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4747 uint32_t i, rar_entries;
4748 uint32_t rar_low, rar_high;
4750 rar_entries = hw->mac.num_rar_entries;
4752 for (i = 1; i < rar_entries; i++) {
4753 wr32(hw, TXGBE_ETHADDRIDX, i);
4754 rar_high = rd32(hw, TXGBE_ETHADDRH);
4755 rar_low = rd32(hw, TXGBE_ETHADDRL);
4756 if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4757 (rar_high & TXGBE_ETHADDRH_ETAG) &&
4758 (TXGBE_ETHADDRL_ETAG(rar_low) ==
4759 l2_tunnel->tunnel_id)) {
4760 wr32(hw, TXGBE_ETHADDRL, 0);
4761 wr32(hw, TXGBE_ETHADDRH, 0);
4763 txgbe_clear_vmdq(hw, i, BIT_MASK32);
4773 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4774 struct txgbe_l2_tunnel_conf *l2_tunnel)
4777 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4778 uint32_t i, rar_entries;
4779 uint32_t rar_low, rar_high;
4781 /* One entry for one tunnel. Try to remove potential existing entry. */
4782 txgbe_e_tag_filter_del(dev, l2_tunnel);
4784 rar_entries = hw->mac.num_rar_entries;
4786 for (i = 1; i < rar_entries; i++) {
4787 wr32(hw, TXGBE_ETHADDRIDX, i);
4788 rar_high = rd32(hw, TXGBE_ETHADDRH);
4789 if (rar_high & TXGBE_ETHADDRH_VLD) {
4792 txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4793 rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4794 rar_low = l2_tunnel->tunnel_id;
4796 wr32(hw, TXGBE_ETHADDRL, rar_low);
4797 wr32(hw, TXGBE_ETHADDRH, rar_high);
4803 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4804 " Please remove a rule before adding a new one.");
4808 static inline struct txgbe_l2_tn_filter *
4809 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4810 struct txgbe_l2_tn_key *key)
4814 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4818 return l2_tn_info->hash_map[ret];
4822 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4823 struct txgbe_l2_tn_filter *l2_tn_filter)
4827 ret = rte_hash_add_key(l2_tn_info->hash_handle,
4828 &l2_tn_filter->key);
4832 "Failed to insert L2 tunnel filter"
4833 " to hash table %d!",
4838 l2_tn_info->hash_map[ret] = l2_tn_filter;
4840 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4846 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4847 struct txgbe_l2_tn_key *key)
4850 struct txgbe_l2_tn_filter *l2_tn_filter;
4852 ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4856 "No such L2 tunnel filter to delete %d!",
4861 l2_tn_filter = l2_tn_info->hash_map[ret];
4862 l2_tn_info->hash_map[ret] = NULL;
4864 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4865 rte_free(l2_tn_filter);
4870 /* Add l2 tunnel filter */
4872 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4873 struct txgbe_l2_tunnel_conf *l2_tunnel,
4877 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4878 struct txgbe_l2_tn_key key;
4879 struct txgbe_l2_tn_filter *node;
4882 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4883 key.tn_id = l2_tunnel->tunnel_id;
4885 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4889 "The L2 tunnel filter already exists!");
4893 node = rte_zmalloc("txgbe_l2_tn",
4894 sizeof(struct txgbe_l2_tn_filter),
4899 rte_memcpy(&node->key,
4901 sizeof(struct txgbe_l2_tn_key));
4902 node->pool = l2_tunnel->pool;
4903 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4910 switch (l2_tunnel->l2_tunnel_type) {
4911 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4912 ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4915 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4920 if (!restore && ret < 0)
4921 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4926 /* Delete l2 tunnel filter */
4928 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4929 struct txgbe_l2_tunnel_conf *l2_tunnel)
4932 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4933 struct txgbe_l2_tn_key key;
4935 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4936 key.tn_id = l2_tunnel->tunnel_id;
4937 ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4941 switch (l2_tunnel->l2_tunnel_type) {
4942 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4943 ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4946 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4955 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4959 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4961 ctrl = rd32(hw, TXGBE_POOLCTL);
4962 ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4964 ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4965 wr32(hw, TXGBE_POOLCTL, ctrl);
4970 /* Add UDP tunneling port */
4972 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4973 struct rte_eth_udp_tunnel *udp_tunnel)
4975 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4978 if (udp_tunnel == NULL)
4981 switch (udp_tunnel->prot_type) {
4982 case RTE_ETH_TUNNEL_TYPE_VXLAN:
4983 if (udp_tunnel->udp_port == 0) {
4984 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4988 wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4990 case RTE_ETH_TUNNEL_TYPE_GENEVE:
4991 if (udp_tunnel->udp_port == 0) {
4992 PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
4996 wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
4998 case RTE_ETH_TUNNEL_TYPE_TEREDO:
4999 if (udp_tunnel->udp_port == 0) {
5000 PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
5004 wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
5006 case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5007 if (udp_tunnel->udp_port == 0) {
5008 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
5012 wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
5015 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5025 /* Remove UDP tunneling port */
5027 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5028 struct rte_eth_udp_tunnel *udp_tunnel)
5030 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5034 if (udp_tunnel == NULL)
5037 switch (udp_tunnel->prot_type) {
5038 case RTE_ETH_TUNNEL_TYPE_VXLAN:
5039 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
5040 if (cur_port != udp_tunnel->udp_port) {
5041 PMD_DRV_LOG(ERR, "Port %u does not exist.",
5042 udp_tunnel->udp_port);
5046 wr32(hw, TXGBE_VXLANPORT, 0);
5048 case RTE_ETH_TUNNEL_TYPE_GENEVE:
5049 cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
5050 if (cur_port != udp_tunnel->udp_port) {
5051 PMD_DRV_LOG(ERR, "Port %u does not exist.",
5052 udp_tunnel->udp_port);
5056 wr32(hw, TXGBE_GENEVEPORT, 0);
5058 case RTE_ETH_TUNNEL_TYPE_TEREDO:
5059 cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
5060 if (cur_port != udp_tunnel->udp_port) {
5061 PMD_DRV_LOG(ERR, "Port %u does not exist.",
5062 udp_tunnel->udp_port);
5066 wr32(hw, TXGBE_TEREDOPORT, 0);
5068 case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5069 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
5070 if (cur_port != udp_tunnel->udp_port) {
5071 PMD_DRV_LOG(ERR, "Port %u does not exist.",
5072 udp_tunnel->udp_port);
5076 wr32(hw, TXGBE_VXLANPORTGPE, 0);
5079 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5089 /* restore n-tuple filter */
5091 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
5093 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5094 struct txgbe_5tuple_filter *node;
5096 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
5097 txgbe_inject_5tuple_filter(dev, node);
5101 /* restore ethernet type filter */
5103 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5105 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5106 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5109 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5110 if (filter_info->ethertype_mask & (1 << i)) {
5111 wr32(hw, TXGBE_ETFLT(i),
5112 filter_info->ethertype_filters[i].etqf);
5113 wr32(hw, TXGBE_ETCLS(i),
5114 filter_info->ethertype_filters[i].etqs);
5120 /* restore SYN filter */
5122 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5124 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5125 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5128 synqf = filter_info->syn_info;
5130 if (synqf & TXGBE_SYNCLS_ENA) {
5131 wr32(hw, TXGBE_SYNCLS, synqf);
5136 /* restore L2 tunnel filter */
5138 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5140 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5141 struct txgbe_l2_tn_filter *node;
5142 struct txgbe_l2_tunnel_conf l2_tn_conf;
5144 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5145 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5146 l2_tn_conf.tunnel_id = node->key.tn_id;
5147 l2_tn_conf.pool = node->pool;
5148 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5152 /* restore rss filter */
5154 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5156 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5158 if (filter_info->rss_info.conf.queue_num)
5159 txgbe_config_rss_filter(dev,
5160 &filter_info->rss_info, TRUE);
5164 txgbe_filter_restore(struct rte_eth_dev *dev)
5166 txgbe_ntuple_filter_restore(dev);
5167 txgbe_ethertype_filter_restore(dev);
5168 txgbe_syn_filter_restore(dev);
5169 txgbe_fdir_filter_restore(dev);
5170 txgbe_l2_tn_filter_restore(dev);
5171 txgbe_rss_filter_restore(dev);
5177 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5179 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5180 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5182 if (l2_tn_info->e_tag_en)
5183 (void)txgbe_e_tag_enable(hw);
5185 if (l2_tn_info->e_tag_fwd_en)
5186 (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5188 (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5191 /* remove all the n-tuple filters */
5193 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5195 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5196 struct txgbe_5tuple_filter *p_5tuple;
5198 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5199 txgbe_remove_5tuple_filter(dev, p_5tuple);
5202 /* remove all the ether type filters */
5204 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5206 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5207 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5210 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5211 if (filter_info->ethertype_mask & (1 << i) &&
5212 !filter_info->ethertype_filters[i].conf) {
5213 (void)txgbe_ethertype_filter_remove(filter_info,
5215 wr32(hw, TXGBE_ETFLT(i), 0);
5216 wr32(hw, TXGBE_ETCLS(i), 0);
5222 /* remove the SYN filter */
5224 txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5226 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5227 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5229 if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5230 filter_info->syn_info = 0;
5232 wr32(hw, TXGBE_SYNCLS, 0);
5237 /* remove all the L2 tunnel filters */
5239 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5241 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5242 struct txgbe_l2_tn_filter *l2_tn_filter;
5243 struct txgbe_l2_tunnel_conf l2_tn_conf;
5246 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5247 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5248 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
5249 l2_tn_conf.pool = l2_tn_filter->pool;
5250 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5258 static const struct eth_dev_ops txgbe_eth_dev_ops = {
5259 .dev_configure = txgbe_dev_configure,
5260 .dev_infos_get = txgbe_dev_info_get,
5261 .dev_start = txgbe_dev_start,
5262 .dev_stop = txgbe_dev_stop,
5263 .dev_set_link_up = txgbe_dev_set_link_up,
5264 .dev_set_link_down = txgbe_dev_set_link_down,
5265 .dev_close = txgbe_dev_close,
5266 .dev_reset = txgbe_dev_reset,
5267 .promiscuous_enable = txgbe_dev_promiscuous_enable,
5268 .promiscuous_disable = txgbe_dev_promiscuous_disable,
5269 .allmulticast_enable = txgbe_dev_allmulticast_enable,
5270 .allmulticast_disable = txgbe_dev_allmulticast_disable,
5271 .link_update = txgbe_dev_link_update,
5272 .stats_get = txgbe_dev_stats_get,
5273 .xstats_get = txgbe_dev_xstats_get,
5274 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
5275 .stats_reset = txgbe_dev_stats_reset,
5276 .xstats_reset = txgbe_dev_xstats_reset,
5277 .xstats_get_names = txgbe_dev_xstats_get_names,
5278 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
5279 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
5280 .fw_version_get = txgbe_fw_version_get,
5281 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
5282 .mtu_set = txgbe_dev_mtu_set,
5283 .vlan_filter_set = txgbe_vlan_filter_set,
5284 .vlan_tpid_set = txgbe_vlan_tpid_set,
5285 .vlan_offload_set = txgbe_vlan_offload_set,
5286 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
5287 .rx_queue_start = txgbe_dev_rx_queue_start,
5288 .rx_queue_stop = txgbe_dev_rx_queue_stop,
5289 .tx_queue_start = txgbe_dev_tx_queue_start,
5290 .tx_queue_stop = txgbe_dev_tx_queue_stop,
5291 .rx_queue_setup = txgbe_dev_rx_queue_setup,
5292 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
5293 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
5294 .rx_queue_release = txgbe_dev_rx_queue_release,
5295 .tx_queue_setup = txgbe_dev_tx_queue_setup,
5296 .tx_queue_release = txgbe_dev_tx_queue_release,
5297 .dev_led_on = txgbe_dev_led_on,
5298 .dev_led_off = txgbe_dev_led_off,
5299 .flow_ctrl_get = txgbe_flow_ctrl_get,
5300 .flow_ctrl_set = txgbe_flow_ctrl_set,
5301 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
5302 .mac_addr_add = txgbe_add_rar,
5303 .mac_addr_remove = txgbe_remove_rar,
5304 .mac_addr_set = txgbe_set_default_mac_addr,
5305 .uc_hash_table_set = txgbe_uc_hash_table_set,
5306 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
5307 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
5308 .reta_update = txgbe_dev_rss_reta_update,
5309 .reta_query = txgbe_dev_rss_reta_query,
5310 .rss_hash_update = txgbe_dev_rss_hash_update,
5311 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
5312 .flow_ops_get = txgbe_dev_flow_ops_get,
5313 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
5314 .rxq_info_get = txgbe_rxq_info_get,
5315 .txq_info_get = txgbe_txq_info_get,
5316 .timesync_enable = txgbe_timesync_enable,
5317 .timesync_disable = txgbe_timesync_disable,
5318 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5319 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5320 .get_reg = txgbe_get_regs,
5321 .get_eeprom_length = txgbe_get_eeprom_length,
5322 .get_eeprom = txgbe_get_eeprom,
5323 .set_eeprom = txgbe_set_eeprom,
5324 .get_module_info = txgbe_get_module_info,
5325 .get_module_eeprom = txgbe_get_module_eeprom,
5326 .get_dcb_info = txgbe_dev_get_dcb_info,
5327 .timesync_adjust_time = txgbe_timesync_adjust_time,
5328 .timesync_read_time = txgbe_timesync_read_time,
5329 .timesync_write_time = txgbe_timesync_write_time,
5330 .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
5331 .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
5332 .tm_ops_get = txgbe_tm_ops_get,
5333 .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
5336 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5337 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5338 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5339 RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
5340 TXGBE_DEVARG_BP_AUTO "=<0|1>"
5341 TXGBE_DEVARG_KR_POLL "=<0|1>"
5342 TXGBE_DEVARG_KR_PRESENT "=<0|1>"
5343 TXGBE_DEVARG_KX_SGMII "=<0|1>"
5344 TXGBE_DEVARG_FFE_SET "=<0-4>"
5345 TXGBE_DEVARG_FFE_MAIN "=<uint16>"
5346 TXGBE_DEVARG_FFE_PRE "=<uint16>"
5347 TXGBE_DEVARG_FFE_POST "=<uint16>");
5349 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
5350 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
5351 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
5353 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5354 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
5356 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5357 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
5360 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5361 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);