1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_interrupts.h>
14 #include <rte_debug.h>
16 #include <rte_memory.h>
18 #include <rte_alarm.h>
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
25 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29 int wait_to_complete);
30 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
31 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
32 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
35 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
36 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
37 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
38 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
40 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
41 struct rte_intr_handle *handle);
42 static void txgbe_dev_interrupt_handler(void *param);
43 static void txgbe_dev_interrupt_delayed_handler(void *param);
44 static void txgbe_configure_msix(struct rte_eth_dev *dev);
46 #define TXGBE_SET_HWSTRIP(h, q) do {\
47 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
48 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
49 (h)->bitmap[idx] |= 1 << bit;\
52 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
53 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
54 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
55 (h)->bitmap[idx] &= ~(1 << bit);\
58 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
59 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
60 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
61 (r) = (h)->bitmap[idx] >> bit & 1;\
65 * The set of PCI devices this driver supports
67 static const struct rte_pci_id pci_id_txgbe_map[] = {
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
70 { .vendor_id = 0, /* sentinel */ },
73 static const struct rte_eth_desc_lim rx_desc_lim = {
74 .nb_max = TXGBE_RING_DESC_MAX,
75 .nb_min = TXGBE_RING_DESC_MIN,
76 .nb_align = TXGBE_RXD_ALIGN,
79 static const struct rte_eth_desc_lim tx_desc_lim = {
80 .nb_max = TXGBE_RING_DESC_MAX,
81 .nb_min = TXGBE_RING_DESC_MIN,
82 .nb_align = TXGBE_TXD_ALIGN,
83 .nb_seg_max = TXGBE_TX_MAX_SEG,
84 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
87 static const struct eth_dev_ops txgbe_eth_dev_ops;
89 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
90 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
91 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
93 HW_XSTAT(mng_bmc2host_packets),
94 HW_XSTAT(mng_host2bmc_packets),
100 HW_XSTAT(rx_total_bytes),
101 HW_XSTAT(rx_total_packets),
102 HW_XSTAT(tx_total_packets),
103 HW_XSTAT(rx_total_missed_packets),
104 HW_XSTAT(rx_broadcast_packets),
105 HW_XSTAT(rx_multicast_packets),
106 HW_XSTAT(rx_management_packets),
107 HW_XSTAT(tx_management_packets),
108 HW_XSTAT(rx_management_dropped),
111 HW_XSTAT(rx_crc_errors),
112 HW_XSTAT(rx_illegal_byte_errors),
113 HW_XSTAT(rx_error_bytes),
114 HW_XSTAT(rx_mac_short_packet_dropped),
115 HW_XSTAT(rx_length_errors),
116 HW_XSTAT(rx_undersize_errors),
117 HW_XSTAT(rx_fragment_errors),
118 HW_XSTAT(rx_oversize_errors),
119 HW_XSTAT(rx_jabber_errors),
120 HW_XSTAT(rx_l3_l4_xsum_error),
121 HW_XSTAT(mac_local_errors),
122 HW_XSTAT(mac_remote_errors),
125 HW_XSTAT(flow_director_added_filters),
126 HW_XSTAT(flow_director_removed_filters),
127 HW_XSTAT(flow_director_filter_add_errors),
128 HW_XSTAT(flow_director_filter_remove_errors),
129 HW_XSTAT(flow_director_matched_filters),
130 HW_XSTAT(flow_director_missed_filters),
133 HW_XSTAT(rx_fcoe_crc_errors),
134 HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
135 HW_XSTAT(rx_fcoe_dropped),
136 HW_XSTAT(rx_fcoe_packets),
137 HW_XSTAT(tx_fcoe_packets),
138 HW_XSTAT(rx_fcoe_bytes),
139 HW_XSTAT(tx_fcoe_bytes),
140 HW_XSTAT(rx_fcoe_no_ddp),
141 HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
144 HW_XSTAT(tx_macsec_pkts_untagged),
145 HW_XSTAT(tx_macsec_pkts_encrypted),
146 HW_XSTAT(tx_macsec_pkts_protected),
147 HW_XSTAT(tx_macsec_octets_encrypted),
148 HW_XSTAT(tx_macsec_octets_protected),
149 HW_XSTAT(rx_macsec_pkts_untagged),
150 HW_XSTAT(rx_macsec_pkts_badtag),
151 HW_XSTAT(rx_macsec_pkts_nosci),
152 HW_XSTAT(rx_macsec_pkts_unknownsci),
153 HW_XSTAT(rx_macsec_octets_decrypted),
154 HW_XSTAT(rx_macsec_octets_validated),
155 HW_XSTAT(rx_macsec_sc_pkts_unchecked),
156 HW_XSTAT(rx_macsec_sc_pkts_delayed),
157 HW_XSTAT(rx_macsec_sc_pkts_late),
158 HW_XSTAT(rx_macsec_sa_pkts_ok),
159 HW_XSTAT(rx_macsec_sa_pkts_invalid),
160 HW_XSTAT(rx_macsec_sa_pkts_notvalid),
161 HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
162 HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
165 HW_XSTAT(rx_size_64_packets),
166 HW_XSTAT(rx_size_65_to_127_packets),
167 HW_XSTAT(rx_size_128_to_255_packets),
168 HW_XSTAT(rx_size_256_to_511_packets),
169 HW_XSTAT(rx_size_512_to_1023_packets),
170 HW_XSTAT(rx_size_1024_to_max_packets),
171 HW_XSTAT(tx_size_64_packets),
172 HW_XSTAT(tx_size_65_to_127_packets),
173 HW_XSTAT(tx_size_128_to_255_packets),
174 HW_XSTAT(tx_size_256_to_511_packets),
175 HW_XSTAT(tx_size_512_to_1023_packets),
176 HW_XSTAT(tx_size_1024_to_max_packets),
179 HW_XSTAT(tx_xon_packets),
180 HW_XSTAT(rx_xon_packets),
181 HW_XSTAT(tx_xoff_packets),
182 HW_XSTAT(rx_xoff_packets),
184 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
185 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
186 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
187 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
190 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
191 sizeof(rte_txgbe_stats_strings[0]))
193 /* Per-priority statistics */
194 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
195 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
196 UP_XSTAT(rx_up_packets),
197 UP_XSTAT(tx_up_packets),
198 UP_XSTAT(rx_up_bytes),
199 UP_XSTAT(tx_up_bytes),
200 UP_XSTAT(rx_up_drop_packets),
202 UP_XSTAT(tx_up_xon_packets),
203 UP_XSTAT(rx_up_xon_packets),
204 UP_XSTAT(tx_up_xoff_packets),
205 UP_XSTAT(rx_up_xoff_packets),
206 UP_XSTAT(rx_up_dropped),
207 UP_XSTAT(rx_up_mbuf_alloc_errors),
208 UP_XSTAT(tx_up_xon2off_packets),
211 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
212 sizeof(rte_txgbe_up_strings[0]))
214 /* Per-queue statistics */
215 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
216 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
217 QP_XSTAT(rx_qp_packets),
218 QP_XSTAT(tx_qp_packets),
219 QP_XSTAT(rx_qp_bytes),
220 QP_XSTAT(tx_qp_bytes),
221 QP_XSTAT(rx_qp_mc_packets),
224 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
225 sizeof(rte_txgbe_qp_strings[0]))
228 txgbe_is_sfp(struct txgbe_hw *hw)
230 switch (hw->phy.type) {
231 case txgbe_phy_sfp_avago:
232 case txgbe_phy_sfp_ftl:
233 case txgbe_phy_sfp_intel:
234 case txgbe_phy_sfp_unknown:
235 case txgbe_phy_sfp_tyco_passive:
236 case txgbe_phy_sfp_unknown_passive:
243 static inline int32_t
244 txgbe_pf_reset_hw(struct txgbe_hw *hw)
249 status = hw->mac.reset_hw(hw);
251 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
252 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
253 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
254 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
257 if (status == TXGBE_ERR_SFP_NOT_PRESENT)
263 txgbe_enable_intr(struct rte_eth_dev *dev)
265 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
266 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
268 wr32(hw, TXGBE_IENMISC, intr->mask_misc);
269 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
270 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
275 txgbe_disable_intr(struct txgbe_hw *hw)
277 PMD_INIT_FUNC_TRACE();
279 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
280 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
281 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
286 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
291 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
292 struct txgbe_stat_mappings *stat_mappings =
293 TXGBE_DEV_STAT_MAPPINGS(eth_dev);
294 uint32_t qsmr_mask = 0;
295 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
299 if (hw->mac.type != txgbe_mac_raptor)
302 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
305 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
306 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
309 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
310 if (n >= TXGBE_NB_STAT_MAPPING) {
311 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
314 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
316 /* Now clear any previous stat_idx set */
317 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
319 stat_mappings->tqsm[n] &= ~clearing_mask;
321 stat_mappings->rqsm[n] &= ~clearing_mask;
323 q_map = (uint32_t)stat_idx;
324 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
325 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
327 stat_mappings->tqsm[n] |= qsmr_mask;
329 stat_mappings->rqsm[n] |= qsmr_mask;
331 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
332 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
334 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
335 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
340 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
344 struct txgbe_dcb_tc_config *tc;
346 UNREFERENCED_PARAMETER(hw);
348 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
349 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
350 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
351 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
352 tc = &dcb_config->tc_config[i];
353 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
354 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
355 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
356 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
357 tc->pfc = txgbe_dcb_pfc_disabled;
360 /* Initialize default user to priority mapping, UPx->TC0 */
361 tc = &dcb_config->tc_config[0];
362 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
363 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
364 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
365 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
366 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
368 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
369 dcb_config->pfc_mode_enable = false;
370 dcb_config->vt_mode = true;
371 dcb_config->round_robin_enable = false;
372 /* support all DCB capabilities */
373 dcb_config->support.capabilities = 0xFF;
377 * Ensure that all locks are released before first NVM or PHY access
380 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
385 * These ones are more tricky since they are common to all ports; but
386 * swfw_sync retries last long enough (1s) to be almost sure that if
387 * lock can not be taken it is due to an improper lock of the
390 mask = TXGBE_MNGSEM_SWPHY |
392 TXGBE_MNGSEM_SWFLASH;
393 if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
394 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
396 hw->mac.release_swfw_sync(hw, mask);
400 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
402 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
403 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
404 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
405 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
406 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
407 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
408 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
409 const struct rte_memzone *mz;
414 PMD_INIT_FUNC_TRACE();
416 eth_dev->dev_ops = &txgbe_eth_dev_ops;
417 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
418 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
419 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
422 * For secondary processes, we don't initialise any further as primary
423 * has already done this work. Only check we don't need a different
424 * RX and TX function.
426 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
427 struct txgbe_tx_queue *txq;
428 /* TX queue function in primary, set by last queue initialized
429 * Tx queue may not initialized by primary process
431 if (eth_dev->data->tx_queues) {
432 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
433 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
434 txgbe_set_tx_function(eth_dev, txq);
436 /* Use default TX function if we get here */
437 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
438 "Using default TX function.");
441 txgbe_set_rx_function(eth_dev);
446 rte_eth_copy_pci_info(eth_dev, pci_dev);
448 /* Vendor and Device ID need to be set before init of shared code */
449 hw->device_id = pci_dev->id.device_id;
450 hw->vendor_id = pci_dev->id.vendor_id;
451 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
452 hw->allow_unsupported_sfp = 1;
454 /* Reserve memory for interrupt status block */
455 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
456 16, TXGBE_ALIGN, SOCKET_ID_ANY);
460 hw->isb_dma = TMZ_PADDR(mz);
461 hw->isb_mem = TMZ_VADDR(mz);
463 /* Initialize the shared code (base driver) */
464 err = txgbe_init_shared_code(hw);
466 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
470 /* Unlock any pending hardware semaphore */
471 txgbe_swfw_lock_reset(hw);
473 /* Initialize DCB configuration*/
474 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
475 txgbe_dcb_init(hw, dcb_config);
477 /* Get Hardware Flow Control setting */
478 hw->fc.requested_mode = txgbe_fc_full;
479 hw->fc.current_mode = txgbe_fc_full;
480 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
481 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
482 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
483 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
487 err = hw->rom.init_params(hw);
489 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
493 /* Make sure we have a good EEPROM before we read from it */
494 err = hw->rom.validate_checksum(hw, &csum);
496 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
500 err = hw->mac.init_hw(hw);
503 * Devices with copper phys will fail to initialise if txgbe_init_hw()
504 * is called too soon after the kernel driver unbinding/binding occurs.
505 * The failure occurs in txgbe_identify_phy() for all devices,
506 * but for non-copper devies, txgbe_identify_sfp_module() is
507 * also called. See txgbe_identify_phy(). The reason for the
508 * failure is not known, and only occuts when virtualisation features
509 * are disabled in the bios. A delay of 200ms was found to be enough by
510 * trial-and-error, and is doubled to be safe.
512 if (err && hw->phy.media_type == txgbe_media_type_copper) {
514 err = hw->mac.init_hw(hw);
517 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
520 if (err == TXGBE_ERR_EEPROM_VERSION) {
521 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
522 "LOM. Please be aware there may be issues associated "
523 "with your hardware.");
524 PMD_INIT_LOG(ERR, "If you are experiencing problems "
525 "please contact your hardware representative "
526 "who provided you with this hardware.");
527 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
528 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
531 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
535 /* Reset the hw statistics */
536 txgbe_dev_stats_reset(eth_dev);
538 /* disable interrupt */
539 txgbe_disable_intr(hw);
541 /* Allocate memory for storing MAC addresses */
542 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
543 hw->mac.num_rar_entries, 0);
544 if (eth_dev->data->mac_addrs == NULL) {
546 "Failed to allocate %u bytes needed to store "
548 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
552 /* Copy the permanent MAC address */
553 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
554 ð_dev->data->mac_addrs[0]);
556 /* Allocate memory for storing hash filter MAC addresses */
557 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
558 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
559 if (eth_dev->data->hash_mac_addrs == NULL) {
561 "Failed to allocate %d bytes needed to store MAC addresses",
562 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
566 /* initialize the vfta */
567 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
569 /* initialize the hw strip bitmap*/
570 memset(hwstrip, 0, sizeof(*hwstrip));
572 /* initialize PF if max_vfs not zero */
573 txgbe_pf_host_init(eth_dev);
575 ctrl_ext = rd32(hw, TXGBE_PORTCTL);
576 /* let hardware know driver is loaded */
577 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
578 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
579 ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
580 wr32(hw, TXGBE_PORTCTL, ctrl_ext);
583 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
584 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
585 (int)hw->mac.type, (int)hw->phy.type,
586 (int)hw->phy.sfp_type);
588 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
589 (int)hw->mac.type, (int)hw->phy.type);
591 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
592 eth_dev->data->port_id, pci_dev->id.vendor_id,
593 pci_dev->id.device_id);
595 rte_intr_callback_register(intr_handle,
596 txgbe_dev_interrupt_handler, eth_dev);
598 /* enable uio/vfio intr/eventfd mapping */
599 rte_intr_enable(intr_handle);
601 /* enable support intr */
602 txgbe_enable_intr(eth_dev);
604 /* initialize bandwidth configuration info */
605 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
611 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
613 PMD_INIT_FUNC_TRACE();
615 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
618 txgbe_dev_close(eth_dev);
624 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
625 struct rte_pci_device *pci_dev)
627 struct rte_eth_dev *pf_ethdev;
628 struct rte_eth_devargs eth_da;
631 if (pci_dev->device.devargs) {
632 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
637 memset(ð_da, 0, sizeof(eth_da));
640 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
641 sizeof(struct txgbe_adapter),
642 eth_dev_pci_specific_init, pci_dev,
643 eth_txgbe_dev_init, NULL);
645 if (retval || eth_da.nb_representor_ports < 1)
648 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
649 if (pf_ethdev == NULL)
655 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
657 struct rte_eth_dev *ethdev;
659 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
663 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
666 static struct rte_pci_driver rte_txgbe_pmd = {
667 .id_table = pci_id_txgbe_map,
668 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
669 RTE_PCI_DRV_INTR_LSC,
670 .probe = eth_txgbe_pci_probe,
671 .remove = eth_txgbe_pci_remove,
675 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
677 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
678 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
683 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
684 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
685 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
690 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
692 /* update local VFTA copy */
693 shadow_vfta->vfta[vid_idx] = vfta;
699 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
701 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
702 struct txgbe_rx_queue *rxq;
704 uint32_t rxcfg, rxbal, rxbah;
707 txgbe_vlan_hw_strip_enable(dev, queue);
709 txgbe_vlan_hw_strip_disable(dev, queue);
711 rxq = dev->data->rx_queues[queue];
712 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
713 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
714 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
715 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
716 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
717 !(rxcfg & TXGBE_RXCFG_VLAN);
718 rxcfg |= TXGBE_RXCFG_VLAN;
720 restart = (rxcfg & TXGBE_RXCFG_ENA) &&
721 (rxcfg & TXGBE_RXCFG_VLAN);
722 rxcfg &= ~TXGBE_RXCFG_VLAN;
724 rxcfg &= ~TXGBE_RXCFG_ENA;
727 /* set vlan strip for ring */
728 txgbe_dev_rx_queue_stop(dev, queue);
729 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
730 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
731 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
732 txgbe_dev_rx_queue_start(dev, queue);
737 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
738 enum rte_vlan_type vlan_type,
741 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
743 uint32_t portctrl, vlan_ext, qinq;
745 portctrl = rd32(hw, TXGBE_PORTCTL);
747 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
748 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
750 case ETH_VLAN_TYPE_INNER:
752 wr32m(hw, TXGBE_VLANCTL,
753 TXGBE_VLANCTL_TPID_MASK,
754 TXGBE_VLANCTL_TPID(tpid));
755 wr32m(hw, TXGBE_DMATXCTRL,
756 TXGBE_DMATXCTRL_TPID_MASK,
757 TXGBE_DMATXCTRL_TPID(tpid));
760 PMD_DRV_LOG(ERR, "Inner type is not supported"
765 wr32m(hw, TXGBE_TAGTPID(0),
766 TXGBE_TAGTPID_LSB_MASK,
767 TXGBE_TAGTPID_LSB(tpid));
770 case ETH_VLAN_TYPE_OUTER:
772 /* Only the high 16-bits is valid */
773 wr32m(hw, TXGBE_EXTAG,
774 TXGBE_EXTAG_VLAN_MASK,
775 TXGBE_EXTAG_VLAN(tpid));
777 wr32m(hw, TXGBE_VLANCTL,
778 TXGBE_VLANCTL_TPID_MASK,
779 TXGBE_VLANCTL_TPID(tpid));
780 wr32m(hw, TXGBE_DMATXCTRL,
781 TXGBE_DMATXCTRL_TPID_MASK,
782 TXGBE_DMATXCTRL_TPID(tpid));
786 wr32m(hw, TXGBE_TAGTPID(0),
787 TXGBE_TAGTPID_MSB_MASK,
788 TXGBE_TAGTPID_MSB(tpid));
792 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
800 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
802 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
805 PMD_INIT_FUNC_TRACE();
807 /* Filter Table Disable */
808 vlnctrl = rd32(hw, TXGBE_VLANCTL);
809 vlnctrl &= ~TXGBE_VLANCTL_VFE;
810 wr32(hw, TXGBE_VLANCTL, vlnctrl);
814 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
816 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
817 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
821 PMD_INIT_FUNC_TRACE();
823 /* Filter Table Enable */
824 vlnctrl = rd32(hw, TXGBE_VLANCTL);
825 vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
826 vlnctrl |= TXGBE_VLANCTL_VFE;
827 wr32(hw, TXGBE_VLANCTL, vlnctrl);
829 /* write whatever is in local vfta copy */
830 for (i = 0; i < TXGBE_VFTA_SIZE; i++)
831 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
835 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
837 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
838 struct txgbe_rx_queue *rxq;
840 if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
844 TXGBE_SET_HWSTRIP(hwstrip, queue);
846 TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
848 if (queue >= dev->data->nb_rx_queues)
851 rxq = dev->data->rx_queues[queue];
854 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
855 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
857 rxq->vlan_flags = PKT_RX_VLAN;
858 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
863 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
865 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
868 PMD_INIT_FUNC_TRACE();
870 ctrl = rd32(hw, TXGBE_RXCFG(queue));
871 ctrl &= ~TXGBE_RXCFG_VLAN;
872 wr32(hw, TXGBE_RXCFG(queue), ctrl);
874 /* record those setting for HW strip per queue */
875 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
879 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
881 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
884 PMD_INIT_FUNC_TRACE();
886 ctrl = rd32(hw, TXGBE_RXCFG(queue));
887 ctrl |= TXGBE_RXCFG_VLAN;
888 wr32(hw, TXGBE_RXCFG(queue), ctrl);
890 /* record those setting for HW strip per queue */
891 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
895 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
897 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
900 PMD_INIT_FUNC_TRACE();
902 ctrl = rd32(hw, TXGBE_PORTCTL);
903 ctrl &= ~TXGBE_PORTCTL_VLANEXT;
904 ctrl &= ~TXGBE_PORTCTL_QINQ;
905 wr32(hw, TXGBE_PORTCTL, ctrl);
909 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
911 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
912 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
913 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
916 PMD_INIT_FUNC_TRACE();
918 ctrl = rd32(hw, TXGBE_PORTCTL);
919 ctrl |= TXGBE_PORTCTL_VLANEXT;
920 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
921 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
922 ctrl |= TXGBE_PORTCTL_QINQ;
923 wr32(hw, TXGBE_PORTCTL, ctrl);
927 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
929 struct txgbe_rx_queue *rxq;
932 PMD_INIT_FUNC_TRACE();
934 for (i = 0; i < dev->data->nb_rx_queues; i++) {
935 rxq = dev->data->rx_queues[i];
937 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
938 txgbe_vlan_strip_queue_set(dev, i, 1);
940 txgbe_vlan_strip_queue_set(dev, i, 0);
945 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
948 struct rte_eth_rxmode *rxmode;
949 struct txgbe_rx_queue *rxq;
951 if (mask & ETH_VLAN_STRIP_MASK) {
952 rxmode = &dev->data->dev_conf.rxmode;
953 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
954 for (i = 0; i < dev->data->nb_rx_queues; i++) {
955 rxq = dev->data->rx_queues[i];
956 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
959 for (i = 0; i < dev->data->nb_rx_queues; i++) {
960 rxq = dev->data->rx_queues[i];
961 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
967 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
969 struct rte_eth_rxmode *rxmode;
970 rxmode = &dev->data->dev_conf.rxmode;
972 if (mask & ETH_VLAN_STRIP_MASK)
973 txgbe_vlan_hw_strip_config(dev);
975 if (mask & ETH_VLAN_FILTER_MASK) {
976 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
977 txgbe_vlan_hw_filter_enable(dev);
979 txgbe_vlan_hw_filter_disable(dev);
982 if (mask & ETH_VLAN_EXTEND_MASK) {
983 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
984 txgbe_vlan_hw_extend_enable(dev);
986 txgbe_vlan_hw_extend_disable(dev);
993 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
995 txgbe_config_vlan_strip_on_all_queues(dev, mask);
997 txgbe_vlan_offload_config(dev, mask);
1003 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1005 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1006 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1007 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1009 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1010 wr32(hw, TXGBE_VLANCTL, vlanctrl);
1014 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1016 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1021 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1024 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1030 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1031 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1032 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1033 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1038 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1040 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1041 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1042 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1044 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1045 /* check multi-queue mode */
1046 switch (dev_conf->rxmode.mq_mode) {
1047 case ETH_MQ_RX_VMDQ_DCB:
1048 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1050 case ETH_MQ_RX_VMDQ_DCB_RSS:
1051 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1052 PMD_INIT_LOG(ERR, "SRIOV active,"
1053 " unsupported mq_mode rx %d.",
1054 dev_conf->rxmode.mq_mode);
1057 case ETH_MQ_RX_VMDQ_RSS:
1058 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1059 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1060 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1061 PMD_INIT_LOG(ERR, "SRIOV is active,"
1062 " invalid queue number"
1063 " for VMDQ RSS, allowed"
1064 " value are 1, 2 or 4.");
1068 case ETH_MQ_RX_VMDQ_ONLY:
1069 case ETH_MQ_RX_NONE:
1070 /* if nothing mq mode configure, use default scheme */
1071 dev->data->dev_conf.rxmode.mq_mode =
1072 ETH_MQ_RX_VMDQ_ONLY;
1074 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1075 /* SRIOV only works in VMDq enable mode */
1076 PMD_INIT_LOG(ERR, "SRIOV is active,"
1077 " wrong mq_mode rx %d.",
1078 dev_conf->rxmode.mq_mode);
1082 switch (dev_conf->txmode.mq_mode) {
1083 case ETH_MQ_TX_VMDQ_DCB:
1084 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1085 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1087 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1088 dev->data->dev_conf.txmode.mq_mode =
1089 ETH_MQ_TX_VMDQ_ONLY;
1093 /* check valid queue number */
1094 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1095 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1096 PMD_INIT_LOG(ERR, "SRIOV is active,"
1097 " nb_rx_q=%d nb_tx_q=%d queue number"
1098 " must be less than or equal to %d.",
1100 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1104 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1105 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1109 /* check configuration for vmdb+dcb mode */
1110 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1111 const struct rte_eth_vmdq_dcb_conf *conf;
1113 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1114 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1115 TXGBE_VMDQ_DCB_NB_QUEUES);
1118 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1119 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1120 conf->nb_queue_pools == ETH_32_POOLS)) {
1121 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1122 " nb_queue_pools must be %d or %d.",
1123 ETH_16_POOLS, ETH_32_POOLS);
1127 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1128 const struct rte_eth_vmdq_dcb_tx_conf *conf;
1130 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1131 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1132 TXGBE_VMDQ_DCB_NB_QUEUES);
1135 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1136 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1137 conf->nb_queue_pools == ETH_32_POOLS)) {
1138 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1139 " nb_queue_pools != %d and"
1140 " nb_queue_pools != %d.",
1141 ETH_16_POOLS, ETH_32_POOLS);
1146 /* For DCB mode check our configuration before we go further */
1147 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1148 const struct rte_eth_dcb_rx_conf *conf;
1150 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1151 if (!(conf->nb_tcs == ETH_4_TCS ||
1152 conf->nb_tcs == ETH_8_TCS)) {
1153 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1154 " and nb_tcs != %d.",
1155 ETH_4_TCS, ETH_8_TCS);
1160 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1161 const struct rte_eth_dcb_tx_conf *conf;
1163 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1164 if (!(conf->nb_tcs == ETH_4_TCS ||
1165 conf->nb_tcs == ETH_8_TCS)) {
1166 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1167 " and nb_tcs != %d.",
1168 ETH_4_TCS, ETH_8_TCS);
1177 txgbe_dev_configure(struct rte_eth_dev *dev)
1179 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1180 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1183 PMD_INIT_FUNC_TRACE();
1185 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1186 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1188 /* multiple queue mode checking */
1189 ret = txgbe_check_mq_mode(dev);
1191 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1196 /* set flag to update link status after init */
1197 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1200 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1201 * allocation Rx preconditions we will reset it.
1203 adapter->rx_bulk_alloc_allowed = true;
1209 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1211 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1212 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1215 gpie = rd32(hw, TXGBE_GPIOINTEN);
1216 gpie |= TXGBE_GPIOBIT_6;
1217 wr32(hw, TXGBE_GPIOINTEN, gpie);
1218 intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1222 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1223 uint16_t tx_rate, uint64_t q_msk)
1225 struct txgbe_hw *hw;
1226 struct txgbe_vf_info *vfinfo;
1227 struct rte_eth_link link;
1228 uint8_t nb_q_per_pool;
1229 uint32_t queue_stride;
1230 uint32_t queue_idx, idx = 0, vf_idx;
1232 uint16_t total_rate = 0;
1233 struct rte_pci_device *pci_dev;
1236 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1237 ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1241 if (vf >= pci_dev->max_vfs)
1244 if (tx_rate > link.link_speed)
1250 hw = TXGBE_DEV_HW(dev);
1251 vfinfo = *(TXGBE_DEV_VFDATA(dev));
1252 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1253 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1254 queue_idx = vf * queue_stride;
1255 queue_end = queue_idx + nb_q_per_pool - 1;
1256 if (queue_end >= hw->mac.max_tx_queues)
1260 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1263 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1265 total_rate += vfinfo[vf_idx].tx_rate[idx];
1271 /* Store tx_rate for this vf. */
1272 for (idx = 0; idx < nb_q_per_pool; idx++) {
1273 if (((uint64_t)0x1 << idx) & q_msk) {
1274 if (vfinfo[vf].tx_rate[idx] != tx_rate)
1275 vfinfo[vf].tx_rate[idx] = tx_rate;
1276 total_rate += tx_rate;
1280 if (total_rate > dev->data->dev_link.link_speed) {
1281 /* Reset stored TX rate of the VF if it causes exceed
1284 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1288 /* Set ARBTXRATE of each queue/pool for vf X */
1289 for (; queue_idx <= queue_end; queue_idx++) {
1291 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1299 * Configure device link speed and setup link.
1300 * It returns 0 on success.
1303 txgbe_dev_start(struct rte_eth_dev *dev)
1305 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1306 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1307 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1308 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1309 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1310 uint32_t intr_vector = 0;
1312 bool link_up = false, negotiate = 0;
1314 uint32_t allowed_speeds = 0;
1318 uint32_t *link_speeds;
1320 PMD_INIT_FUNC_TRACE();
1322 /* TXGBE devices don't support:
1323 * - half duplex (checked afterwards for valid speeds)
1324 * - fixed speed: TODO implement
1326 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1328 "Invalid link_speeds for port %u, fix speed not supported",
1329 dev->data->port_id);
1333 /* Stop the link setup handler before resetting the HW. */
1334 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1336 /* disable uio/vfio intr/eventfd mapping */
1337 rte_intr_disable(intr_handle);
1340 hw->adapter_stopped = 0;
1343 /* reinitialize adapter
1344 * this calls reset and start
1346 hw->nb_rx_queues = dev->data->nb_rx_queues;
1347 hw->nb_tx_queues = dev->data->nb_tx_queues;
1348 status = txgbe_pf_reset_hw(hw);
1351 hw->mac.start_hw(hw);
1352 hw->mac.get_link_status = true;
1354 /* configure PF module if SRIOV enabled */
1355 txgbe_pf_host_configure(dev);
1357 txgbe_dev_phy_intr_setup(dev);
1359 /* check and configure queue intr-vector mapping */
1360 if ((rte_intr_cap_multiple(intr_handle) ||
1361 !RTE_ETH_DEV_SRIOV(dev).active) &&
1362 dev->data->dev_conf.intr_conf.rxq != 0) {
1363 intr_vector = dev->data->nb_rx_queues;
1364 if (rte_intr_efd_enable(intr_handle, intr_vector))
1368 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1369 intr_handle->intr_vec =
1370 rte_zmalloc("intr_vec",
1371 dev->data->nb_rx_queues * sizeof(int), 0);
1372 if (intr_handle->intr_vec == NULL) {
1373 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1374 " intr_vec", dev->data->nb_rx_queues);
1379 /* confiugre msix for sleep until rx interrupt */
1380 txgbe_configure_msix(dev);
1382 /* initialize transmission unit */
1383 txgbe_dev_tx_init(dev);
1385 /* This can fail when allocating mbufs for descriptor rings */
1386 err = txgbe_dev_rx_init(dev);
1388 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1392 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1393 ETH_VLAN_EXTEND_MASK;
1394 err = txgbe_vlan_offload_config(dev, mask);
1396 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1400 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1401 /* Enable vlan filtering for VMDq */
1402 txgbe_vmdq_vlan_hw_filter_enable(dev);
1405 /* Configure DCB hw */
1406 txgbe_configure_pb(dev);
1407 txgbe_configure_port(dev);
1408 txgbe_configure_dcb(dev);
1410 /* Restore vf rate limit */
1411 if (vfinfo != NULL) {
1412 for (vf = 0; vf < pci_dev->max_vfs; vf++)
1413 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1414 if (vfinfo[vf].tx_rate[idx] != 0)
1415 txgbe_set_vf_rate_limit(dev, vf,
1416 vfinfo[vf].tx_rate[idx],
1420 err = txgbe_dev_rxtx_start(dev);
1422 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1426 /* Skip link setup if loopback mode is enabled. */
1427 if (hw->mac.type == txgbe_mac_raptor &&
1428 dev->data->dev_conf.lpbk_mode)
1429 goto skip_link_setup;
1431 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1432 err = hw->mac.setup_sfp(hw);
1437 if (hw->phy.media_type == txgbe_media_type_copper) {
1438 /* Turn on the copper */
1439 hw->phy.set_phy_power(hw, true);
1441 /* Turn on the laser */
1442 hw->mac.enable_tx_laser(hw);
1445 err = hw->mac.check_link(hw, &speed, &link_up, 0);
1448 dev->data->dev_link.link_status = link_up;
1450 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1454 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1457 link_speeds = &dev->data->dev_conf.link_speeds;
1458 if (*link_speeds & ~allowed_speeds) {
1459 PMD_INIT_LOG(ERR, "Invalid link setting");
1464 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1465 speed = (TXGBE_LINK_SPEED_100M_FULL |
1466 TXGBE_LINK_SPEED_1GB_FULL |
1467 TXGBE_LINK_SPEED_10GB_FULL);
1469 if (*link_speeds & ETH_LINK_SPEED_10G)
1470 speed |= TXGBE_LINK_SPEED_10GB_FULL;
1471 if (*link_speeds & ETH_LINK_SPEED_5G)
1472 speed |= TXGBE_LINK_SPEED_5GB_FULL;
1473 if (*link_speeds & ETH_LINK_SPEED_2_5G)
1474 speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1475 if (*link_speeds & ETH_LINK_SPEED_1G)
1476 speed |= TXGBE_LINK_SPEED_1GB_FULL;
1477 if (*link_speeds & ETH_LINK_SPEED_100M)
1478 speed |= TXGBE_LINK_SPEED_100M_FULL;
1481 err = hw->mac.setup_link(hw, speed, link_up);
1487 if (rte_intr_allow_others(intr_handle)) {
1488 /* check if lsc interrupt is enabled */
1489 if (dev->data->dev_conf.intr_conf.lsc != 0)
1490 txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1492 txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1493 txgbe_dev_macsec_interrupt_setup(dev);
1494 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1496 rte_intr_callback_unregister(intr_handle,
1497 txgbe_dev_interrupt_handler, dev);
1498 if (dev->data->dev_conf.intr_conf.lsc != 0)
1499 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1500 " no intr multiplex");
1503 /* check if rxq interrupt is enabled */
1504 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1505 rte_intr_dp_is_en(intr_handle))
1506 txgbe_dev_rxq_interrupt_setup(dev);
1508 /* enable uio/vfio intr/eventfd mapping */
1509 rte_intr_enable(intr_handle);
1511 /* resume enabled intr since hw reset */
1512 txgbe_enable_intr(dev);
1515 * Update link status right before return, because it may
1516 * start link configuration process in a separate thread.
1518 txgbe_dev_link_update(dev, 0);
1520 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1522 txgbe_read_stats_registers(hw, hw_stats);
1523 hw->offset_loaded = 1;
1528 PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1529 txgbe_dev_clear_queues(dev);
1534 * Stop device: disable rx and tx functions to allow for reconfiguring.
1537 txgbe_dev_stop(struct rte_eth_dev *dev)
1539 struct rte_eth_link link;
1540 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1541 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1542 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1543 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1544 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1547 if (hw->adapter_stopped)
1550 PMD_INIT_FUNC_TRACE();
1552 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1554 /* disable interrupts */
1555 txgbe_disable_intr(hw);
1558 txgbe_pf_reset_hw(hw);
1559 hw->adapter_stopped = 0;
1564 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1565 vfinfo[vf].clear_to_send = false;
1567 if (hw->phy.media_type == txgbe_media_type_copper) {
1568 /* Turn off the copper */
1569 hw->phy.set_phy_power(hw, false);
1571 /* Turn off the laser */
1572 hw->mac.disable_tx_laser(hw);
1575 txgbe_dev_clear_queues(dev);
1577 /* Clear stored conf */
1578 dev->data->scattered_rx = 0;
1581 /* Clear recorded link status */
1582 memset(&link, 0, sizeof(link));
1583 rte_eth_linkstatus_set(dev, &link);
1585 if (!rte_intr_allow_others(intr_handle))
1586 /* resume to the default handler */
1587 rte_intr_callback_register(intr_handle,
1588 txgbe_dev_interrupt_handler,
1591 /* Clean datapath event and queue/vec mapping */
1592 rte_intr_efd_disable(intr_handle);
1593 if (intr_handle->intr_vec != NULL) {
1594 rte_free(intr_handle->intr_vec);
1595 intr_handle->intr_vec = NULL;
1598 adapter->rss_reta_updated = 0;
1599 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1601 hw->adapter_stopped = true;
1602 dev->data->dev_started = 0;
1608 * Set device link up: enable tx.
1611 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1613 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1615 if (hw->phy.media_type == txgbe_media_type_copper) {
1616 /* Turn on the copper */
1617 hw->phy.set_phy_power(hw, true);
1619 /* Turn on the laser */
1620 hw->mac.enable_tx_laser(hw);
1621 txgbe_dev_link_update(dev, 0);
1628 * Set device link down: disable tx.
1631 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1633 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1635 if (hw->phy.media_type == txgbe_media_type_copper) {
1636 /* Turn off the copper */
1637 hw->phy.set_phy_power(hw, false);
1639 /* Turn off the laser */
1640 hw->mac.disable_tx_laser(hw);
1641 txgbe_dev_link_update(dev, 0);
1648 * Reset and stop device.
1651 txgbe_dev_close(struct rte_eth_dev *dev)
1653 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1654 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1655 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1659 PMD_INIT_FUNC_TRACE();
1661 txgbe_pf_reset_hw(hw);
1663 ret = txgbe_dev_stop(dev);
1665 txgbe_dev_free_queues(dev);
1667 /* reprogram the RAR[0] in case user changed it. */
1668 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1670 /* Unlock any pending hardware semaphore */
1671 txgbe_swfw_lock_reset(hw);
1673 /* disable uio intr before callback unregister */
1674 rte_intr_disable(intr_handle);
1677 ret = rte_intr_callback_unregister(intr_handle,
1678 txgbe_dev_interrupt_handler, dev);
1679 if (ret >= 0 || ret == -ENOENT) {
1681 } else if (ret != -EAGAIN) {
1683 "intr callback unregister failed: %d",
1687 } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1689 /* cancel the delay handler before remove dev */
1690 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1692 /* uninitialize PF if max_vfs not zero */
1693 txgbe_pf_host_uninit(dev);
1695 rte_free(dev->data->mac_addrs);
1696 dev->data->mac_addrs = NULL;
1698 rte_free(dev->data->hash_mac_addrs);
1699 dev->data->hash_mac_addrs = NULL;
1708 txgbe_dev_reset(struct rte_eth_dev *dev)
1712 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1713 * its VF to make them align with it. The detailed notification
1714 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1715 * To avoid unexpected behavior in VF, currently reset of PF with
1716 * SR-IOV activation is not supported. It might be supported later.
1718 if (dev->data->sriov.active)
1721 ret = eth_txgbe_dev_uninit(dev);
1725 ret = eth_txgbe_dev_init(dev, NULL);
1730 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
1732 uint32_t current_counter = rd32(hw, reg); \
1733 if (current_counter < last_counter) \
1734 current_counter += 0x100000000LL; \
1735 if (!hw->offset_loaded) \
1736 last_counter = current_counter; \
1737 counter = current_counter - last_counter; \
1738 counter &= 0xFFFFFFFFLL; \
1741 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1743 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
1744 uint64_t current_counter_msb = rd32(hw, reg_msb); \
1745 uint64_t current_counter = (current_counter_msb << 32) | \
1746 current_counter_lsb; \
1747 if (current_counter < last_counter) \
1748 current_counter += 0x1000000000LL; \
1749 if (!hw->offset_loaded) \
1750 last_counter = current_counter; \
1751 counter = current_counter - last_counter; \
1752 counter &= 0xFFFFFFFFFLL; \
1756 txgbe_read_stats_registers(struct txgbe_hw *hw,
1757 struct txgbe_hw_stats *hw_stats)
1762 for (i = 0; i < hw->nb_rx_queues; i++) {
1763 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1764 hw->qp_last[i].rx_qp_packets,
1765 hw_stats->qp[i].rx_qp_packets);
1766 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1767 hw->qp_last[i].rx_qp_bytes,
1768 hw_stats->qp[i].rx_qp_bytes);
1769 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1770 hw->qp_last[i].rx_qp_mc_packets,
1771 hw_stats->qp[i].rx_qp_mc_packets);
1774 for (i = 0; i < hw->nb_tx_queues; i++) {
1775 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1776 hw->qp_last[i].tx_qp_packets,
1777 hw_stats->qp[i].tx_qp_packets);
1778 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1779 hw->qp_last[i].tx_qp_bytes,
1780 hw_stats->qp[i].tx_qp_bytes);
1783 for (i = 0; i < TXGBE_MAX_UP; i++) {
1784 hw_stats->up[i].rx_up_xon_packets +=
1785 rd32(hw, TXGBE_PBRXUPXON(i));
1786 hw_stats->up[i].rx_up_xoff_packets +=
1787 rd32(hw, TXGBE_PBRXUPXOFF(i));
1788 hw_stats->up[i].tx_up_xon_packets +=
1789 rd32(hw, TXGBE_PBTXUPXON(i));
1790 hw_stats->up[i].tx_up_xoff_packets +=
1791 rd32(hw, TXGBE_PBTXUPXOFF(i));
1792 hw_stats->up[i].tx_up_xon2off_packets +=
1793 rd32(hw, TXGBE_PBTXUPOFF(i));
1794 hw_stats->up[i].rx_up_dropped +=
1795 rd32(hw, TXGBE_PBRXMISS(i));
1797 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1798 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1799 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1800 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1803 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1804 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1806 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1807 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1808 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1811 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1812 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1813 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1815 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1816 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1817 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1819 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1820 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1822 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1823 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1824 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1825 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1826 hw_stats->rx_size_512_to_1023_packets +=
1827 rd64(hw, TXGBE_MACRX512TO1023L);
1828 hw_stats->rx_size_1024_to_max_packets +=
1829 rd64(hw, TXGBE_MACRX1024TOMAXL);
1830 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1831 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1832 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1833 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1834 hw_stats->tx_size_512_to_1023_packets +=
1835 rd64(hw, TXGBE_MACTX512TO1023L);
1836 hw_stats->tx_size_1024_to_max_packets +=
1837 rd64(hw, TXGBE_MACTX1024TOMAXL);
1839 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1840 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1841 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1844 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1845 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1846 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1847 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1850 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1851 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1852 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1853 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1854 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1855 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1856 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1858 /* Flow Director Stats */
1859 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1860 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1861 hw_stats->flow_director_added_filters +=
1862 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1863 hw_stats->flow_director_removed_filters +=
1864 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1865 hw_stats->flow_director_filter_add_errors +=
1866 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1867 hw_stats->flow_director_filter_remove_errors +=
1868 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1871 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1872 hw_stats->tx_macsec_pkts_encrypted +=
1873 rd32(hw, TXGBE_LSECTX_ENCPKT);
1874 hw_stats->tx_macsec_pkts_protected +=
1875 rd32(hw, TXGBE_LSECTX_PROTPKT);
1876 hw_stats->tx_macsec_octets_encrypted +=
1877 rd32(hw, TXGBE_LSECTX_ENCOCT);
1878 hw_stats->tx_macsec_octets_protected +=
1879 rd32(hw, TXGBE_LSECTX_PROTOCT);
1880 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1881 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1882 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1883 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1884 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1885 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1886 hw_stats->rx_macsec_sc_pkts_unchecked +=
1887 rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1888 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1889 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1890 for (i = 0; i < 2; i++) {
1891 hw_stats->rx_macsec_sa_pkts_ok +=
1892 rd32(hw, TXGBE_LSECRX_OKPKT(i));
1893 hw_stats->rx_macsec_sa_pkts_invalid +=
1894 rd32(hw, TXGBE_LSECRX_INVPKT(i));
1895 hw_stats->rx_macsec_sa_pkts_notvalid +=
1896 rd32(hw, TXGBE_LSECRX_BADPKT(i));
1898 hw_stats->rx_macsec_sa_pkts_unusedsa +=
1899 rd32(hw, TXGBE_LSECRX_INVSAPKT);
1900 hw_stats->rx_macsec_sa_pkts_notusingsa +=
1901 rd32(hw, TXGBE_LSECRX_BADSAPKT);
1903 hw_stats->rx_total_missed_packets = 0;
1904 for (i = 0; i < TXGBE_MAX_UP; i++) {
1905 hw_stats->rx_total_missed_packets +=
1906 hw_stats->up[i].rx_up_dropped;
1911 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1913 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1914 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1915 struct txgbe_stat_mappings *stat_mappings =
1916 TXGBE_DEV_STAT_MAPPINGS(dev);
1919 txgbe_read_stats_registers(hw, hw_stats);
1924 /* Fill out the rte_eth_stats statistics structure */
1925 stats->ipackets = hw_stats->rx_packets;
1926 stats->ibytes = hw_stats->rx_bytes;
1927 stats->opackets = hw_stats->tx_packets;
1928 stats->obytes = hw_stats->tx_bytes;
1930 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1931 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1932 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1933 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1934 memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1935 for (i = 0; i < TXGBE_MAX_QP; i++) {
1936 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1937 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1940 q_map = (stat_mappings->rqsm[n] >> offset)
1941 & QMAP_FIELD_RESERVED_BITS_MASK;
1942 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1943 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1944 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1945 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1947 q_map = (stat_mappings->tqsm[n] >> offset)
1948 & QMAP_FIELD_RESERVED_BITS_MASK;
1949 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1950 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1951 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1952 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1956 stats->imissed = hw_stats->rx_total_missed_packets;
1957 stats->ierrors = hw_stats->rx_crc_errors +
1958 hw_stats->rx_mac_short_packet_dropped +
1959 hw_stats->rx_length_errors +
1960 hw_stats->rx_undersize_errors +
1961 hw_stats->rx_oversize_errors +
1962 hw_stats->rx_drop_packets +
1963 hw_stats->rx_illegal_byte_errors +
1964 hw_stats->rx_error_bytes +
1965 hw_stats->rx_fragment_errors +
1966 hw_stats->rx_fcoe_crc_errors +
1967 hw_stats->rx_fcoe_mbuf_allocation_errors;
1975 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
1977 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1978 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1980 /* HW registers are cleared on read */
1981 hw->offset_loaded = 0;
1982 txgbe_dev_stats_get(dev, NULL);
1983 hw->offset_loaded = 1;
1985 /* Reset software totals */
1986 memset(hw_stats, 0, sizeof(*hw_stats));
1991 /* This function calculates the number of xstats based on the current config */
1993 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
1995 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1996 return TXGBE_NB_HW_STATS +
1997 TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
1998 TXGBE_NB_QP_STATS * nb_queues;
2002 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2006 /* Extended stats from txgbe_hw_stats */
2007 if (id < TXGBE_NB_HW_STATS) {
2008 snprintf(name, size, "[hw]%s",
2009 rte_txgbe_stats_strings[id].name);
2012 id -= TXGBE_NB_HW_STATS;
2014 /* Priority Stats */
2015 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2016 nb = id / TXGBE_NB_UP_STATS;
2017 st = id % TXGBE_NB_UP_STATS;
2018 snprintf(name, size, "[p%u]%s", nb,
2019 rte_txgbe_up_strings[st].name);
2022 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2025 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2026 nb = id / TXGBE_NB_QP_STATS;
2027 st = id % TXGBE_NB_QP_STATS;
2028 snprintf(name, size, "[q%u]%s", nb,
2029 rte_txgbe_qp_strings[st].name);
2032 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2034 return -(int)(id + 1);
2038 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2042 /* Extended stats from txgbe_hw_stats */
2043 if (id < TXGBE_NB_HW_STATS) {
2044 *offset = rte_txgbe_stats_strings[id].offset;
2047 id -= TXGBE_NB_HW_STATS;
2049 /* Priority Stats */
2050 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2051 nb = id / TXGBE_NB_UP_STATS;
2052 st = id % TXGBE_NB_UP_STATS;
2053 *offset = rte_txgbe_up_strings[st].offset +
2054 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2057 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2060 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2061 nb = id / TXGBE_NB_QP_STATS;
2062 st = id % TXGBE_NB_QP_STATS;
2063 *offset = rte_txgbe_qp_strings[st].offset +
2064 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2067 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2069 return -(int)(id + 1);
2072 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2073 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2075 unsigned int i, count;
2077 count = txgbe_xstats_calc_num(dev);
2078 if (xstats_names == NULL)
2081 /* Note: limit >= cnt_stats checked upstream
2082 * in rte_eth_xstats_names()
2084 limit = min(limit, count);
2086 /* Extended stats from txgbe_hw_stats */
2087 for (i = 0; i < limit; i++) {
2088 if (txgbe_get_name_by_id(i, xstats_names[i].name,
2089 sizeof(xstats_names[i].name))) {
2090 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2098 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2099 struct rte_eth_xstat_name *xstats_names,
2100 const uint64_t *ids,
2106 return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2108 for (i = 0; i < limit; i++) {
2109 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2110 sizeof(xstats_names[i].name))) {
2111 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2120 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2123 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2124 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2125 unsigned int i, count;
2127 txgbe_read_stats_registers(hw, hw_stats);
2129 /* If this is a reset xstats is NULL, and we have cleared the
2130 * registers by reading them.
2132 count = txgbe_xstats_calc_num(dev);
2136 limit = min(limit, txgbe_xstats_calc_num(dev));
2138 /* Extended stats from txgbe_hw_stats */
2139 for (i = 0; i < limit; i++) {
2140 uint32_t offset = 0;
2142 if (txgbe_get_offset_by_id(i, &offset)) {
2143 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2146 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2154 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2157 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2158 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2159 unsigned int i, count;
2161 txgbe_read_stats_registers(hw, hw_stats);
2163 /* If this is a reset xstats is NULL, and we have cleared the
2164 * registers by reading them.
2166 count = txgbe_xstats_calc_num(dev);
2170 limit = min(limit, txgbe_xstats_calc_num(dev));
2172 /* Extended stats from txgbe_hw_stats */
2173 for (i = 0; i < limit; i++) {
2176 if (txgbe_get_offset_by_id(i, &offset)) {
2177 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2180 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2187 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2188 uint64_t *values, unsigned int limit)
2190 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2194 return txgbe_dev_xstats_get_(dev, values, limit);
2196 for (i = 0; i < limit; i++) {
2199 if (txgbe_get_offset_by_id(ids[i], &offset)) {
2200 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2203 values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2210 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2212 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2213 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2215 /* HW registers are cleared on read */
2216 hw->offset_loaded = 0;
2217 txgbe_read_stats_registers(hw, hw_stats);
2218 hw->offset_loaded = 1;
2220 /* Reset software totals */
2221 memset(hw_stats, 0, sizeof(*hw_stats));
2227 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2229 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2230 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2232 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2233 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2234 dev_info->min_rx_bufsize = 1024;
2235 dev_info->max_rx_pktlen = 15872;
2236 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2237 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2238 dev_info->max_vfs = pci_dev->max_vfs;
2239 dev_info->max_vmdq_pools = ETH_64_POOLS;
2240 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2241 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2242 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2243 dev_info->rx_queue_offload_capa);
2244 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2245 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2247 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2249 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2250 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2251 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2253 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2258 dev_info->default_txconf = (struct rte_eth_txconf) {
2260 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2261 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2262 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2264 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2268 dev_info->rx_desc_lim = rx_desc_lim;
2269 dev_info->tx_desc_lim = tx_desc_lim;
2271 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2272 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2273 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2275 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2276 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2278 /* Driver-preferred Rx/Tx parameters */
2279 dev_info->default_rxportconf.burst_size = 32;
2280 dev_info->default_txportconf.burst_size = 32;
2281 dev_info->default_rxportconf.nb_queues = 1;
2282 dev_info->default_txportconf.nb_queues = 1;
2283 dev_info->default_rxportconf.ring_size = 256;
2284 dev_info->default_txportconf.ring_size = 256;
2290 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2292 if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2293 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2294 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2295 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2296 return txgbe_get_supported_ptypes();
2302 txgbe_dev_setup_link_alarm_handler(void *param)
2304 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2305 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2306 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2308 bool autoneg = false;
2310 speed = hw->phy.autoneg_advertised;
2312 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2314 hw->mac.setup_link(hw, speed, true);
2316 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2319 /* return 0 means link status changed, -1 means not changed */
2321 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2322 int wait_to_complete)
2324 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2325 struct rte_eth_link link;
2326 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2327 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2332 memset(&link, 0, sizeof(link));
2333 link.link_status = ETH_LINK_DOWN;
2334 link.link_speed = ETH_SPEED_NUM_NONE;
2335 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2336 link.link_autoneg = ETH_LINK_AUTONEG;
2338 hw->mac.get_link_status = true;
2340 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2341 return rte_eth_linkstatus_set(dev, &link);
2343 /* check if it needs to wait to complete, if lsc interrupt is enabled */
2344 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2347 err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2350 link.link_speed = ETH_SPEED_NUM_100M;
2351 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2352 return rte_eth_linkstatus_set(dev, &link);
2356 if (hw->phy.media_type == txgbe_media_type_fiber) {
2357 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2358 rte_eal_alarm_set(10,
2359 txgbe_dev_setup_link_alarm_handler, dev);
2361 return rte_eth_linkstatus_set(dev, &link);
2364 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2365 link.link_status = ETH_LINK_UP;
2366 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2368 switch (link_speed) {
2370 case TXGBE_LINK_SPEED_UNKNOWN:
2371 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2372 link.link_speed = ETH_SPEED_NUM_100M;
2375 case TXGBE_LINK_SPEED_100M_FULL:
2376 link.link_speed = ETH_SPEED_NUM_100M;
2379 case TXGBE_LINK_SPEED_1GB_FULL:
2380 link.link_speed = ETH_SPEED_NUM_1G;
2383 case TXGBE_LINK_SPEED_2_5GB_FULL:
2384 link.link_speed = ETH_SPEED_NUM_2_5G;
2387 case TXGBE_LINK_SPEED_5GB_FULL:
2388 link.link_speed = ETH_SPEED_NUM_5G;
2391 case TXGBE_LINK_SPEED_10GB_FULL:
2392 link.link_speed = ETH_SPEED_NUM_10G;
2396 return rte_eth_linkstatus_set(dev, &link);
2400 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2402 return txgbe_dev_link_update_share(dev, wait_to_complete);
2406 * It clears the interrupt causes and enables the interrupt.
2407 * It will be called once only during nic initialized.
2410 * Pointer to struct rte_eth_dev.
2412 * Enable or Disable.
2415 * - On success, zero.
2416 * - On failure, a negative value.
2419 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2421 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2423 txgbe_dev_link_status_print(dev);
2425 intr->mask_misc |= TXGBE_ICRMISC_LSC;
2427 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2433 * It clears the interrupt causes and enables the interrupt.
2434 * It will be called once only during nic initialized.
2437 * Pointer to struct rte_eth_dev.
2440 * - On success, zero.
2441 * - On failure, a negative value.
2444 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2446 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2448 intr->mask[0] |= TXGBE_ICR_MASK;
2449 intr->mask[1] |= TXGBE_ICR_MASK;
2455 * It clears the interrupt causes and enables the interrupt.
2456 * It will be called once only during nic initialized.
2459 * Pointer to struct rte_eth_dev.
2462 * - On success, zero.
2463 * - On failure, a negative value.
2466 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2468 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2470 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2476 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2479 * Pointer to struct rte_eth_dev.
2482 * - On success, zero.
2483 * - On failure, a negative value.
2486 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2489 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2490 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2492 /* clear all cause mask */
2493 txgbe_disable_intr(hw);
2495 /* read-on-clear nic registers here */
2496 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2497 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2501 /* set flag for async link update */
2502 if (eicr & TXGBE_ICRMISC_LSC)
2503 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2505 if (eicr & TXGBE_ICRMISC_VFMBX)
2506 intr->flags |= TXGBE_FLAG_MAILBOX;
2508 if (eicr & TXGBE_ICRMISC_LNKSEC)
2509 intr->flags |= TXGBE_FLAG_MACSEC;
2511 if (eicr & TXGBE_ICRMISC_GPIO)
2512 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2518 * It gets and then prints the link status.
2521 * Pointer to struct rte_eth_dev.
2524 * - On success, zero.
2525 * - On failure, a negative value.
2528 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2530 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2531 struct rte_eth_link link;
2533 rte_eth_linkstatus_get(dev, &link);
2535 if (link.link_status) {
2536 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2537 (int)(dev->data->port_id),
2538 (unsigned int)link.link_speed,
2539 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2540 "full-duplex" : "half-duplex");
2542 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2543 (int)(dev->data->port_id));
2545 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2546 pci_dev->addr.domain,
2548 pci_dev->addr.devid,
2549 pci_dev->addr.function);
2553 * It executes link_update after knowing an interrupt occurred.
2556 * Pointer to struct rte_eth_dev.
2559 * - On success, zero.
2560 * - On failure, a negative value.
2563 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2564 struct rte_intr_handle *intr_handle)
2566 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2568 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2570 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2572 if (intr->flags & TXGBE_FLAG_MAILBOX) {
2573 txgbe_pf_mbx_process(dev);
2574 intr->flags &= ~TXGBE_FLAG_MAILBOX;
2577 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2578 hw->phy.handle_lasi(hw);
2579 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2582 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2583 struct rte_eth_link link;
2585 /*get the link status before link update, for predicting later*/
2586 rte_eth_linkstatus_get(dev, &link);
2588 txgbe_dev_link_update(dev, 0);
2591 if (!link.link_status)
2592 /* handle it 1 sec later, wait it being stable */
2593 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2594 /* likely to down */
2596 /* handle it 4 sec later, wait it being stable */
2597 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2599 txgbe_dev_link_status_print(dev);
2600 if (rte_eal_alarm_set(timeout * 1000,
2601 txgbe_dev_interrupt_delayed_handler,
2603 PMD_DRV_LOG(ERR, "Error setting alarm");
2605 /* remember original mask */
2606 intr->mask_misc_orig = intr->mask_misc;
2607 /* only disable lsc interrupt */
2608 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2612 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2613 txgbe_enable_intr(dev);
2614 rte_intr_enable(intr_handle);
2620 * Interrupt handler which shall be registered for alarm callback for delayed
2621 * handling specific interrupt to wait for the stable nic state. As the
2622 * NIC interrupt state is not stable for txgbe after link is just down,
2623 * it needs to wait 4 seconds to get the stable status.
2626 * Pointer to interrupt handle.
2628 * The address of parameter (struct rte_eth_dev *) registered before.
2634 txgbe_dev_interrupt_delayed_handler(void *param)
2636 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2637 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2638 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2639 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2640 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2643 txgbe_disable_intr(hw);
2645 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2646 if (eicr & TXGBE_ICRMISC_VFMBX)
2647 txgbe_pf_mbx_process(dev);
2649 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2650 hw->phy.handle_lasi(hw);
2651 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2654 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2655 txgbe_dev_link_update(dev, 0);
2656 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2657 txgbe_dev_link_status_print(dev);
2658 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2662 if (intr->flags & TXGBE_FLAG_MACSEC) {
2663 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2665 intr->flags &= ~TXGBE_FLAG_MACSEC;
2668 /* restore original mask */
2669 intr->mask_misc = intr->mask_misc_orig;
2670 intr->mask_misc_orig = 0;
2672 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2673 txgbe_enable_intr(dev);
2674 rte_intr_enable(intr_handle);
2678 * Interrupt handler triggered by NIC for handling
2679 * specific interrupt.
2682 * Pointer to interrupt handle.
2684 * The address of parameter (struct rte_eth_dev *) registered before.
2690 txgbe_dev_interrupt_handler(void *param)
2692 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2694 txgbe_dev_interrupt_get_status(dev);
2695 txgbe_dev_interrupt_action(dev, dev->intr_handle);
2699 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2701 struct txgbe_hw *hw;
2707 hw = TXGBE_DEV_HW(dev);
2709 fc_conf->pause_time = hw->fc.pause_time;
2710 fc_conf->high_water = hw->fc.high_water[0];
2711 fc_conf->low_water = hw->fc.low_water[0];
2712 fc_conf->send_xon = hw->fc.send_xon;
2713 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2716 * Return rx_pause status according to actual setting of
2719 mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
2720 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
2726 * Return tx_pause status according to actual setting of
2729 fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
2730 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
2735 if (rx_pause && tx_pause)
2736 fc_conf->mode = RTE_FC_FULL;
2738 fc_conf->mode = RTE_FC_RX_PAUSE;
2740 fc_conf->mode = RTE_FC_TX_PAUSE;
2742 fc_conf->mode = RTE_FC_NONE;
2748 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2750 struct txgbe_hw *hw;
2752 uint32_t rx_buf_size;
2753 uint32_t max_high_water;
2754 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2761 PMD_INIT_FUNC_TRACE();
2763 hw = TXGBE_DEV_HW(dev);
2764 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
2765 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2768 * At least reserve one Ethernet frame for watermark
2769 * high_water/low_water in kilo bytes for txgbe
2771 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2772 if (fc_conf->high_water > max_high_water ||
2773 fc_conf->high_water < fc_conf->low_water) {
2774 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2775 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2779 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
2780 hw->fc.pause_time = fc_conf->pause_time;
2781 hw->fc.high_water[0] = fc_conf->high_water;
2782 hw->fc.low_water[0] = fc_conf->low_water;
2783 hw->fc.send_xon = fc_conf->send_xon;
2784 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2786 err = txgbe_fc_enable(hw);
2788 /* Not negotiated is not an error case */
2789 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
2790 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
2791 (fc_conf->mac_ctrl_frame_fwd
2792 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
2798 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
2803 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
2804 struct rte_eth_pfc_conf *pfc_conf)
2807 uint32_t rx_buf_size;
2808 uint32_t max_high_water;
2810 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
2811 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2812 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
2814 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2821 PMD_INIT_FUNC_TRACE();
2823 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
2824 tc_num = map[pfc_conf->priority];
2825 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
2826 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2828 * At least reserve one Ethernet frame for watermark
2829 * high_water/low_water in kilo bytes for txgbe
2831 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2832 if (pfc_conf->fc.high_water > max_high_water ||
2833 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
2834 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2835 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2839 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
2840 hw->fc.pause_time = pfc_conf->fc.pause_time;
2841 hw->fc.send_xon = pfc_conf->fc.send_xon;
2842 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
2843 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
2845 err = txgbe_dcb_pfc_enable(hw, tc_num);
2847 /* Not negotiated is not an error case */
2848 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
2851 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
2856 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2857 struct rte_eth_rss_reta_entry64 *reta_conf,
2862 uint16_t idx, shift;
2863 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2864 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2866 PMD_INIT_FUNC_TRACE();
2868 if (!txgbe_rss_update_sp(hw->mac.type)) {
2869 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2874 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2875 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2876 "(%d) doesn't match the number hardware can supported "
2877 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2881 for (i = 0; i < reta_size; i += 4) {
2882 idx = i / RTE_RETA_GROUP_SIZE;
2883 shift = i % RTE_RETA_GROUP_SIZE;
2884 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2888 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2889 for (j = 0; j < 4; j++) {
2890 if (RS8(mask, j, 0x1)) {
2891 reta &= ~(MS32(8 * j, 0xFF));
2892 reta |= LS32(reta_conf[idx].reta[shift + j],
2896 wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
2898 adapter->rss_reta_updated = 1;
2904 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2905 struct rte_eth_rss_reta_entry64 *reta_conf,
2908 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2911 uint16_t idx, shift;
2913 PMD_INIT_FUNC_TRACE();
2915 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2916 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2917 "(%d) doesn't match the number hardware can supported "
2918 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
2922 for (i = 0; i < reta_size; i += 4) {
2923 idx = i / RTE_RETA_GROUP_SIZE;
2924 shift = i % RTE_RETA_GROUP_SIZE;
2925 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2929 reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
2930 for (j = 0; j < 4; j++) {
2931 if (RS8(mask, j, 0x1))
2932 reta_conf[idx].reta[shift + j] =
2933 (uint16_t)RS32(reta, 8 * j, 0xFF);
2941 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2942 uint32_t index, uint32_t pool)
2944 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2945 uint32_t enable_addr = 1;
2947 return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
2952 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2954 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2956 txgbe_clear_rar(hw, index);
2960 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2962 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2964 txgbe_remove_rar(dev, 0);
2965 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2971 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
2973 uint32_t vector = 0;
2975 switch (hw->mac.mc_filter_type) {
2976 case 0: /* use bits [47:36] of the address */
2977 vector = ((uc_addr->addr_bytes[4] >> 4) |
2978 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2980 case 1: /* use bits [46:35] of the address */
2981 vector = ((uc_addr->addr_bytes[4] >> 3) |
2982 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2984 case 2: /* use bits [45:34] of the address */
2985 vector = ((uc_addr->addr_bytes[4] >> 2) |
2986 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2988 case 3: /* use bits [43:32] of the address */
2989 vector = ((uc_addr->addr_bytes[4]) |
2990 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2992 default: /* Invalid mc_filter_type */
2996 /* vector can only be 12-bits or boundary will be exceeded */
3002 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3003 struct rte_ether_addr *mac_addr, uint8_t on)
3011 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3012 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3014 /* The UTA table only exists on pf hardware */
3015 if (hw->mac.type < txgbe_mac_raptor)
3018 vector = txgbe_uta_vector(hw, mac_addr);
3019 uta_idx = (vector >> 5) & 0x7F;
3020 uta_mask = 0x1UL << (vector & 0x1F);
3022 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3025 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3027 uta_info->uta_in_use++;
3028 reg_val |= uta_mask;
3029 uta_info->uta_shadow[uta_idx] |= uta_mask;
3031 uta_info->uta_in_use--;
3032 reg_val &= ~uta_mask;
3033 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3036 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3038 psrctl = rd32(hw, TXGBE_PSRCTL);
3039 if (uta_info->uta_in_use > 0)
3040 psrctl |= TXGBE_PSRCTL_UCHFENA;
3042 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3044 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3045 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3046 wr32(hw, TXGBE_PSRCTL, psrctl);
3052 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3054 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3055 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3059 /* The UTA table only exists on pf hardware */
3060 if (hw->mac.type < txgbe_mac_raptor)
3064 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3065 uta_info->uta_shadow[i] = ~0;
3066 wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3069 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3070 uta_info->uta_shadow[i] = 0;
3071 wr32(hw, TXGBE_UCADDRTBL(i), 0);
3075 psrctl = rd32(hw, TXGBE_PSRCTL);
3077 psrctl |= TXGBE_PSRCTL_UCHFENA;
3079 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3081 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3082 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3083 wr32(hw, TXGBE_PSRCTL, psrctl);
3089 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3091 uint32_t new_val = orig_val;
3093 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3094 new_val |= TXGBE_POOLETHCTL_UTA;
3095 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3096 new_val |= TXGBE_POOLETHCTL_MCHA;
3097 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3098 new_val |= TXGBE_POOLETHCTL_UCHA;
3099 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3100 new_val |= TXGBE_POOLETHCTL_BCA;
3101 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3102 new_val |= TXGBE_POOLETHCTL_MCP;
3108 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3110 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3111 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3113 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3115 if (queue_id < 32) {
3116 mask = rd32(hw, TXGBE_IMS(0));
3117 mask &= (1 << queue_id);
3118 wr32(hw, TXGBE_IMS(0), mask);
3119 } else if (queue_id < 64) {
3120 mask = rd32(hw, TXGBE_IMS(1));
3121 mask &= (1 << (queue_id - 32));
3122 wr32(hw, TXGBE_IMS(1), mask);
3124 rte_intr_enable(intr_handle);
3130 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3135 if (queue_id < 32) {
3136 mask = rd32(hw, TXGBE_IMS(0));
3137 mask &= ~(1 << queue_id);
3138 wr32(hw, TXGBE_IMS(0), mask);
3139 } else if (queue_id < 64) {
3140 mask = rd32(hw, TXGBE_IMS(1));
3141 mask &= ~(1 << (queue_id - 32));
3142 wr32(hw, TXGBE_IMS(1), mask);
3149 * set the IVAR registers, mapping interrupt causes to vectors
3151 * pointer to txgbe_hw struct
3153 * 0 for Rx, 1 for Tx, -1 for other causes
3155 * queue to map the corresponding interrupt to
3157 * the vector to map to the corresponding queue
3160 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3161 uint8_t queue, uint8_t msix_vector)
3165 if (direction == -1) {
3167 msix_vector |= TXGBE_IVARMISC_VLD;
3169 tmp = rd32(hw, TXGBE_IVARMISC);
3170 tmp &= ~(0xFF << idx);
3171 tmp |= (msix_vector << idx);
3172 wr32(hw, TXGBE_IVARMISC, tmp);
3174 /* rx or tx causes */
3175 /* Workround for ICR lost */
3176 idx = ((16 * (queue & 1)) + (8 * direction));
3177 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3178 tmp &= ~(0xFF << idx);
3179 tmp |= (msix_vector << idx);
3180 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3185 * Sets up the hardware to properly generate MSI-X interrupts
3187 * board private structure
3190 txgbe_configure_msix(struct rte_eth_dev *dev)
3192 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3193 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3194 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3195 uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3196 uint32_t vec = TXGBE_MISC_VEC_ID;
3199 /* won't configure msix register if no mapping is done
3200 * between intr vector and event fd
3201 * but if misx has been enabled already, need to configure
3202 * auto clean, auto mask and throttling.
3204 gpie = rd32(hw, TXGBE_GPIE);
3205 if (!rte_intr_dp_is_en(intr_handle) &&
3206 !(gpie & TXGBE_GPIE_MSIX))
3209 if (rte_intr_allow_others(intr_handle)) {
3210 base = TXGBE_RX_VEC_START;
3214 /* setup GPIE for MSI-x mode */
3215 gpie = rd32(hw, TXGBE_GPIE);
3216 gpie |= TXGBE_GPIE_MSIX;
3217 wr32(hw, TXGBE_GPIE, gpie);
3219 /* Populate the IVAR table and set the ITR values to the
3220 * corresponding register.
3222 if (rte_intr_dp_is_en(intr_handle)) {
3223 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3225 /* by default, 1:1 mapping */
3226 txgbe_set_ivar_map(hw, 0, queue_id, vec);
3227 intr_handle->intr_vec[queue_id] = vec;
3228 if (vec < base + intr_handle->nb_efd - 1)
3232 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3234 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3235 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3240 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3241 uint16_t queue_idx, uint16_t tx_rate)
3243 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3246 if (queue_idx >= hw->mac.max_tx_queues)
3250 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3251 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3257 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3258 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3260 wr32(hw, TXGBE_ARBTXMMW, 0x14);
3262 /* Set ARBTXRATE of queue X */
3263 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3264 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3271 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3272 u8 **mc_addr_ptr, u32 *vmdq)
3277 mc_addr = *mc_addr_ptr;
3278 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3283 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3284 struct rte_ether_addr *mc_addr_set,
3285 uint32_t nb_mc_addr)
3287 struct txgbe_hw *hw;
3290 hw = TXGBE_DEV_HW(dev);
3291 mc_addr_list = (u8 *)mc_addr_set;
3292 return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3293 txgbe_dev_addr_list_itr, TRUE);
3297 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3300 case txgbe_mac_raptor:
3307 static const struct eth_dev_ops txgbe_eth_dev_ops = {
3308 .dev_configure = txgbe_dev_configure,
3309 .dev_infos_get = txgbe_dev_info_get,
3310 .dev_start = txgbe_dev_start,
3311 .dev_stop = txgbe_dev_stop,
3312 .dev_set_link_up = txgbe_dev_set_link_up,
3313 .dev_set_link_down = txgbe_dev_set_link_down,
3314 .dev_close = txgbe_dev_close,
3315 .dev_reset = txgbe_dev_reset,
3316 .link_update = txgbe_dev_link_update,
3317 .stats_get = txgbe_dev_stats_get,
3318 .xstats_get = txgbe_dev_xstats_get,
3319 .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
3320 .stats_reset = txgbe_dev_stats_reset,
3321 .xstats_reset = txgbe_dev_xstats_reset,
3322 .xstats_get_names = txgbe_dev_xstats_get_names,
3323 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
3324 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
3325 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
3326 .vlan_filter_set = txgbe_vlan_filter_set,
3327 .vlan_tpid_set = txgbe_vlan_tpid_set,
3328 .vlan_offload_set = txgbe_vlan_offload_set,
3329 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
3330 .rx_queue_start = txgbe_dev_rx_queue_start,
3331 .rx_queue_stop = txgbe_dev_rx_queue_stop,
3332 .tx_queue_start = txgbe_dev_tx_queue_start,
3333 .tx_queue_stop = txgbe_dev_tx_queue_stop,
3334 .rx_queue_setup = txgbe_dev_rx_queue_setup,
3335 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
3336 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
3337 .rx_queue_release = txgbe_dev_rx_queue_release,
3338 .tx_queue_setup = txgbe_dev_tx_queue_setup,
3339 .tx_queue_release = txgbe_dev_tx_queue_release,
3340 .flow_ctrl_get = txgbe_flow_ctrl_get,
3341 .flow_ctrl_set = txgbe_flow_ctrl_set,
3342 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
3343 .mac_addr_add = txgbe_add_rar,
3344 .mac_addr_remove = txgbe_remove_rar,
3345 .mac_addr_set = txgbe_set_default_mac_addr,
3346 .uc_hash_table_set = txgbe_uc_hash_table_set,
3347 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
3348 .set_queue_rate_limit = txgbe_set_queue_rate_limit,
3349 .reta_update = txgbe_dev_rss_reta_update,
3350 .reta_query = txgbe_dev_rss_reta_query,
3351 .rss_hash_update = txgbe_dev_rss_hash_update,
3352 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
3353 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
3354 .rxq_info_get = txgbe_rxq_info_get,
3355 .txq_info_get = txgbe_txq_info_get,
3358 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
3359 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
3360 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
3362 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
3363 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
3365 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
3366 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
3368 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
3369 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
3372 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
3373 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);