1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
15 #include "base/fm10k_api.h"
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
21 #define MAIN_VSI_POOL_NUMBER 0
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US 100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM 128
38 #define GLORT_FD_Q_BASE 0x40
39 #define GLORT_PF_MASK 0xFFC0
40 #define GLORT_FD_MASK GLORT_PF_MASK
41 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
43 int fm10k_logtype_init;
44 int fm10k_logtype_driver;
46 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
47 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
48 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
49 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
50 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
51 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
53 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
54 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
55 const u8 *mac, bool add, uint32_t pool);
56 static void fm10k_tx_queue_release(void *queue);
57 static void fm10k_rx_queue_release(void *queue);
58 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
59 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
60 static int fm10k_check_ftag(struct rte_devargs *devargs);
61 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
63 static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
64 struct rte_eth_dev_info *dev_info);
65 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
66 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
67 struct fm10k_xstats_name_off {
68 char name[RTE_ETH_XSTATS_NAME_SIZE];
72 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
73 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
74 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
75 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
76 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
77 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
78 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
79 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
80 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
84 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
85 sizeof(fm10k_hw_stats_strings[0]))
87 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
88 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
89 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
90 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
93 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
94 sizeof(fm10k_hw_stats_rx_q_strings[0]))
96 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
97 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
98 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
101 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
102 sizeof(fm10k_hw_stats_tx_q_strings[0]))
104 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
105 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
107 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
110 fm10k_mbx_initlock(struct fm10k_hw *hw)
112 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
116 fm10k_mbx_lock(struct fm10k_hw *hw)
118 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
119 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
123 fm10k_mbx_unlock(struct fm10k_hw *hw)
125 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
128 /* Stubs needed for linkage when vPMD is disabled */
129 int __attribute__((weak))
130 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
135 uint16_t __attribute__((weak))
137 __rte_unused void *rx_queue,
138 __rte_unused struct rte_mbuf **rx_pkts,
139 __rte_unused uint16_t nb_pkts)
144 uint16_t __attribute__((weak))
145 fm10k_recv_scattered_pkts_vec(
146 __rte_unused void *rx_queue,
147 __rte_unused struct rte_mbuf **rx_pkts,
148 __rte_unused uint16_t nb_pkts)
153 int __attribute__((weak))
154 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
160 void __attribute__((weak))
161 fm10k_rx_queue_release_mbufs_vec(
162 __rte_unused struct fm10k_rx_queue *rxq)
167 void __attribute__((weak))
168 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
173 int __attribute__((weak))
174 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
179 uint16_t __attribute__((weak))
180 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
181 __rte_unused struct rte_mbuf **tx_pkts,
182 __rte_unused uint16_t nb_pkts)
188 * reset queue to initial state, allocate software buffers used when starting
190 * return 0 on success
191 * return -ENOMEM if buffers cannot be allocated
192 * return -EINVAL if buffers do not satisfy alignment condition
195 rx_queue_reset(struct fm10k_rx_queue *q)
197 static const union fm10k_rx_desc zero = {{0} };
200 PMD_INIT_FUNC_TRACE();
202 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
206 for (i = 0; i < q->nb_desc; ++i) {
207 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
208 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
209 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
213 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
214 q->hw_ring[i].q.pkt_addr = dma_addr;
215 q->hw_ring[i].q.hdr_addr = dma_addr;
218 /* initialize extra software ring entries. Space for these extra
219 * entries is always allocated.
221 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
222 for (i = 0; i < q->nb_fake_desc; ++i) {
223 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
224 q->hw_ring[q->nb_desc + i] = zero;
229 q->next_trigger = q->alloc_thresh - 1;
230 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
231 q->rxrearm_start = 0;
238 * clean queue, descriptor rings, free software buffers used when stopping
242 rx_queue_clean(struct fm10k_rx_queue *q)
244 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
246 PMD_INIT_FUNC_TRACE();
248 /* zero descriptor rings */
249 for (i = 0; i < q->nb_desc; ++i)
250 q->hw_ring[i] = zero;
252 /* zero faked descriptors */
253 for (i = 0; i < q->nb_fake_desc; ++i)
254 q->hw_ring[q->nb_desc + i] = zero;
256 /* vPMD driver has a different way of releasing mbufs. */
257 if (q->rx_using_sse) {
258 fm10k_rx_queue_release_mbufs_vec(q);
262 /* free software buffers */
263 for (i = 0; i < q->nb_desc; ++i) {
265 rte_pktmbuf_free_seg(q->sw_ring[i]);
266 q->sw_ring[i] = NULL;
272 * free all queue memory used when releasing the queue (i.e. configure)
275 rx_queue_free(struct fm10k_rx_queue *q)
277 PMD_INIT_FUNC_TRACE();
279 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
282 rte_free(q->sw_ring);
291 * disable RX queue, wait unitl HW finished necessary flush operation
294 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
298 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
299 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
300 reg & ~FM10K_RXQCTL_ENABLE);
302 /* Wait 100us at most */
303 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
305 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
306 if (!(reg & FM10K_RXQCTL_ENABLE))
310 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
317 * reset queue to initial state, allocate software buffers used when starting
321 tx_queue_reset(struct fm10k_tx_queue *q)
323 PMD_INIT_FUNC_TRACE();
327 q->nb_free = q->nb_desc - 1;
328 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
329 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
333 * clean queue, descriptor rings, free software buffers used when stopping
337 tx_queue_clean(struct fm10k_tx_queue *q)
339 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
341 PMD_INIT_FUNC_TRACE();
343 /* zero descriptor rings */
344 for (i = 0; i < q->nb_desc; ++i)
345 q->hw_ring[i] = zero;
347 /* free software buffers */
348 for (i = 0; i < q->nb_desc; ++i) {
350 rte_pktmbuf_free_seg(q->sw_ring[i]);
351 q->sw_ring[i] = NULL;
357 * free all queue memory used when releasing the queue (i.e. configure)
360 tx_queue_free(struct fm10k_tx_queue *q)
362 PMD_INIT_FUNC_TRACE();
364 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
366 if (q->rs_tracker.list) {
367 rte_free(q->rs_tracker.list);
368 q->rs_tracker.list = NULL;
371 rte_free(q->sw_ring);
380 * disable TX queue, wait unitl HW finished necessary flush operation
383 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
387 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
388 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
389 reg & ~FM10K_TXDCTL_ENABLE);
391 /* Wait 100us at most */
392 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
394 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
395 if (!(reg & FM10K_TXDCTL_ENABLE))
399 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
406 fm10k_check_mq_mode(struct rte_eth_dev *dev)
408 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
409 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
410 struct rte_eth_vmdq_rx_conf *vmdq_conf;
411 uint16_t nb_rx_q = dev->data->nb_rx_queues;
413 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
415 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
416 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
420 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
423 if (hw->mac.type == fm10k_mac_vf) {
424 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
428 /* Check VMDQ queue pool number */
429 if (vmdq_conf->nb_queue_pools >
430 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
431 vmdq_conf->nb_queue_pools > nb_rx_q) {
432 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
433 vmdq_conf->nb_queue_pools);
440 static const struct fm10k_txq_ops def_txq_ops = {
441 .reset = tx_queue_reset,
445 fm10k_dev_configure(struct rte_eth_dev *dev)
448 struct rte_eth_dev_info dev_info;
449 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
451 PMD_INIT_FUNC_TRACE();
453 if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
454 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
456 fm10k_dev_infos_get(dev, &dev_info);
457 if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
458 PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
459 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
460 rx_offloads, dev_info.rx_offload_capa);
463 /* multipe queue mode checking */
464 ret = fm10k_check_mq_mode(dev);
466 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
471 dev->data->scattered_rx = 0;
476 /* fls = find last set bit = 32 minus the number of leading zeros */
478 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
482 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
484 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
485 struct rte_eth_vmdq_rx_conf *vmdq_conf;
488 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
490 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
491 if (!vmdq_conf->pool_map[i].pools)
494 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
495 fm10k_mbx_unlock(hw);
500 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
502 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
504 /* Add default mac address */
505 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
506 MAIN_VSI_POOL_NUMBER);
510 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
512 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
514 uint32_t mrqc, *key, i, reta, j;
517 #define RSS_KEY_SIZE 40
518 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
519 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
520 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
521 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
522 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
523 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
526 if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
527 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
528 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
532 /* random key is rss_intel_key (default) or user provided (rss_key) */
533 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
534 key = (uint32_t *)rss_intel_key;
536 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
538 /* Now fill our hash function seeds, 4 bytes at a time */
539 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
540 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
543 * Fill in redirection table
544 * The byte-swap is needed because NIC registers are in
545 * little-endian order.
548 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
549 if (j == dev->data->nb_rx_queues)
551 reta = (reta << CHAR_BIT) | j;
553 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
558 * Generate RSS hash based on packet types, TCP/UDP
559 * port numbers and/or IPv4/v6 src and dst addresses
561 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
563 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
564 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
565 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
566 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
567 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
568 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
569 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
570 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
571 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
574 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
579 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
583 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
585 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
588 for (i = 0; i < nb_lport_new; i++) {
589 /* Set unicast mode by default. App can change
590 * to other mode in other API func.
593 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
594 FM10K_XCAST_MODE_NONE);
595 fm10k_mbx_unlock(hw);
600 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
602 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
603 struct rte_eth_vmdq_rx_conf *vmdq_conf;
604 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
605 struct fm10k_macvlan_filter_info *macvlan;
606 uint16_t nb_queue_pools = 0; /* pool number in configuration */
607 uint16_t nb_lport_new;
609 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
610 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
612 fm10k_dev_rss_configure(dev);
614 /* only PF supports VMDQ */
615 if (hw->mac.type != fm10k_mac_pf)
618 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
619 nb_queue_pools = vmdq_conf->nb_queue_pools;
621 /* no pool number change, no need to update logic port and VLAN/MAC */
622 if (macvlan->nb_queue_pools == nb_queue_pools)
625 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
626 fm10k_dev_logic_port_update(dev, nb_lport_new);
628 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
629 memset(dev->data->mac_addrs, 0,
630 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
631 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
632 &dev->data->mac_addrs[0]);
633 memset(macvlan, 0, sizeof(*macvlan));
634 macvlan->nb_queue_pools = nb_queue_pools;
637 fm10k_dev_vmdq_rx_configure(dev);
639 fm10k_dev_pf_main_vsi_reset(dev);
643 fm10k_dev_tx_init(struct rte_eth_dev *dev)
645 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 struct fm10k_tx_queue *txq;
651 /* Disable TXINT to avoid possible interrupt */
652 for (i = 0; i < hw->mac.max_queues; i++)
653 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
654 3 << FM10K_TXINT_TIMER_SHIFT);
657 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
658 txq = dev->data->tx_queues[i];
659 base_addr = txq->hw_ring_phys_addr;
660 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
662 /* disable queue to avoid issues while updating state */
663 ret = tx_queue_disable(hw, i);
665 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
668 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
669 * register is read-only for VF.
671 if (fm10k_check_ftag(dev->device->devargs)) {
672 if (hw->mac.type == fm10k_mac_pf) {
673 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
674 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
675 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
677 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
682 /* set location and size for descriptor ring */
683 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
684 base_addr & UINT64_LOWER_32BITS_MASK);
685 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
686 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
687 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
689 /* assign default SGLORT for each TX queue by PF */
690 if (hw->mac.type == fm10k_mac_pf)
691 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
694 /* set up vector or scalar TX function as appropriate */
695 fm10k_set_tx_function(dev);
701 fm10k_dev_rx_init(struct rte_eth_dev *dev)
703 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
704 struct fm10k_macvlan_filter_info *macvlan;
705 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
706 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
708 struct fm10k_rx_queue *rxq;
711 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
712 uint32_t logic_port = hw->mac.dglort_map;
714 uint16_t queue_stride = 0;
716 /* enable RXINT for interrupt mode */
718 if (rte_intr_dp_is_en(intr_handle)) {
719 for (; i < dev->data->nb_rx_queues; i++) {
720 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
721 if (hw->mac.type == fm10k_mac_pf)
722 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
724 FM10K_ITR_MASK_CLEAR);
726 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
728 FM10K_ITR_MASK_CLEAR);
731 /* Disable other RXINT to avoid possible interrupt */
732 for (; i < hw->mac.max_queues; i++)
733 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
734 3 << FM10K_RXINT_TIMER_SHIFT);
736 /* Setup RX queues */
737 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
738 rxq = dev->data->rx_queues[i];
739 base_addr = rxq->hw_ring_phys_addr;
740 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
742 /* disable queue to avoid issues while updating state */
743 ret = rx_queue_disable(hw, i);
745 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
749 /* Setup the Base and Length of the Rx Descriptor Ring */
750 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
751 base_addr & UINT64_LOWER_32BITS_MASK);
752 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
753 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
754 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
756 /* Configure the Rx buffer size for one buff without split */
757 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
758 RTE_PKTMBUF_HEADROOM);
759 /* As RX buffer is aligned to 512B within mbuf, some bytes are
760 * reserved for this purpose, and the worst case could be 511B.
761 * But SRR reg assumes all buffers have the same size. In order
762 * to fill the gap, we'll have to consider the worst case and
763 * assume 512B is reserved. If we don't do so, it's possible
764 * for HW to overwrite data to next mbuf.
766 buf_size -= FM10K_RX_DATABUF_ALIGN;
768 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
769 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
770 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
772 /* It adds dual VLAN length for supporting dual VLAN */
773 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
774 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
775 rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
777 dev->data->scattered_rx = 1;
778 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
779 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
780 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
783 /* Enable drop on empty, it's RO for VF */
784 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
785 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
787 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
788 FM10K_WRITE_FLUSH(hw);
791 /* Configure VMDQ/RSS if applicable */
792 fm10k_dev_mq_rx_configure(dev);
794 /* Decide the best RX function */
795 fm10k_set_rx_function(dev);
797 /* update RX_SGLORT for loopback suppress*/
798 if (hw->mac.type != fm10k_mac_pf)
800 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
801 if (macvlan->nb_queue_pools)
802 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
803 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
804 if (i && queue_stride && !(i % queue_stride))
806 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
813 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
815 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818 struct fm10k_rx_queue *rxq;
820 PMD_INIT_FUNC_TRACE();
822 if (rx_queue_id < dev->data->nb_rx_queues) {
823 rxq = dev->data->rx_queues[rx_queue_id];
824 err = rx_queue_reset(rxq);
825 if (err == -ENOMEM) {
826 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
828 } else if (err == -EINVAL) {
829 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
834 /* Setup the HW Rx Head and Tail Descriptor Pointers
835 * Note: this must be done AFTER the queue is enabled on real
836 * hardware, but BEFORE the queue is enabled when using the
837 * emulation platform. Do it in both places for now and remove
838 * this comment and the following two register writes when the
839 * emulation platform is no longer being used.
841 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
842 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
844 /* Set PF ownership flag for PF devices */
845 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
846 if (hw->mac.type == fm10k_mac_pf)
847 reg |= FM10K_RXQCTL_PF;
848 reg |= FM10K_RXQCTL_ENABLE;
849 /* enable RX queue */
850 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
851 FM10K_WRITE_FLUSH(hw);
853 /* Setup the HW Rx Head and Tail Descriptor Pointers
854 * Note: this must be done AFTER the queue is enabled
856 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
857 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
858 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
865 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
867 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869 PMD_INIT_FUNC_TRACE();
871 if (rx_queue_id < dev->data->nb_rx_queues) {
872 /* Disable RX queue */
873 rx_queue_disable(hw, rx_queue_id);
875 /* Free mbuf and clean HW ring */
876 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
877 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
884 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
886 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887 /** @todo - this should be defined in the shared code */
888 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
889 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
892 PMD_INIT_FUNC_TRACE();
894 if (tx_queue_id < dev->data->nb_tx_queues) {
895 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
899 /* reset head and tail pointers */
900 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
901 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
903 /* enable TX queue */
904 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
905 FM10K_TXDCTL_ENABLE | txdctl);
906 FM10K_WRITE_FLUSH(hw);
907 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
915 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
917 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 PMD_INIT_FUNC_TRACE();
921 if (tx_queue_id < dev->data->nb_tx_queues) {
922 tx_queue_disable(hw, tx_queue_id);
923 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
924 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
930 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
932 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
933 != FM10K_DGLORTMAP_NONE);
937 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
939 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 PMD_INIT_FUNC_TRACE();
944 /* Return if it didn't acquire valid glort range */
945 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
949 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
950 FM10K_XCAST_MODE_PROMISC);
951 fm10k_mbx_unlock(hw);
953 if (status != FM10K_SUCCESS)
954 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
958 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
960 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
964 PMD_INIT_FUNC_TRACE();
966 /* Return if it didn't acquire valid glort range */
967 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
970 if (dev->data->all_multicast == 1)
971 mode = FM10K_XCAST_MODE_ALLMULTI;
973 mode = FM10K_XCAST_MODE_NONE;
976 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
978 fm10k_mbx_unlock(hw);
980 if (status != FM10K_SUCCESS)
981 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
985 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
987 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
990 PMD_INIT_FUNC_TRACE();
992 /* Return if it didn't acquire valid glort range */
993 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
996 /* If promiscuous mode is enabled, it doesn't make sense to enable
997 * allmulticast and disable promiscuous since fm10k only can select
1000 if (dev->data->promiscuous) {
1001 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1002 "needn't enable allmulticast");
1007 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1008 FM10K_XCAST_MODE_ALLMULTI);
1009 fm10k_mbx_unlock(hw);
1011 if (status != FM10K_SUCCESS)
1012 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1016 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1018 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1021 PMD_INIT_FUNC_TRACE();
1023 /* Return if it didn't acquire valid glort range */
1024 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1027 if (dev->data->promiscuous) {
1028 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1029 "since promisc mode is enabled");
1034 /* Change mode to unicast mode */
1035 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1036 FM10K_XCAST_MODE_NONE);
1037 fm10k_mbx_unlock(hw);
1039 if (status != FM10K_SUCCESS)
1040 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1044 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1046 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1047 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1048 uint16_t nb_queue_pools;
1049 struct fm10k_macvlan_filter_info *macvlan;
1051 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1052 nb_queue_pools = macvlan->nb_queue_pools;
1053 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1054 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1056 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1057 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1058 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1060 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1061 /* Configure VMDQ/RSS DGlort Decoder */
1062 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1064 /* Flow Director configurations, only queue number is valid. */
1065 dglortdec = fls(dev->data->nb_rx_queues - 1);
1066 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1067 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1068 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1069 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1071 /* Invalidate all other GLORT entries */
1072 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1073 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1074 FM10K_DGLORTMAP_NONE);
1077 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1079 fm10k_dev_start(struct rte_eth_dev *dev)
1081 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1084 PMD_INIT_FUNC_TRACE();
1086 /* stop, init, then start the hw */
1087 diag = fm10k_stop_hw(hw);
1088 if (diag != FM10K_SUCCESS) {
1089 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1093 diag = fm10k_init_hw(hw);
1094 if (diag != FM10K_SUCCESS) {
1095 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1099 diag = fm10k_start_hw(hw);
1100 if (diag != FM10K_SUCCESS) {
1101 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1105 diag = fm10k_dev_tx_init(dev);
1107 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1111 if (fm10k_dev_rxq_interrupt_setup(dev))
1114 diag = fm10k_dev_rx_init(dev);
1116 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1120 if (hw->mac.type == fm10k_mac_pf)
1121 fm10k_dev_dglort_map_configure(dev);
1123 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1124 struct fm10k_rx_queue *rxq;
1125 rxq = dev->data->rx_queues[i];
1127 if (rxq->rx_deferred_start)
1129 diag = fm10k_dev_rx_queue_start(dev, i);
1132 for (j = 0; j < i; ++j)
1133 rx_queue_clean(dev->data->rx_queues[j]);
1138 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1139 struct fm10k_tx_queue *txq;
1140 txq = dev->data->tx_queues[i];
1142 if (txq->tx_deferred_start)
1144 diag = fm10k_dev_tx_queue_start(dev, i);
1147 for (j = 0; j < i; ++j)
1148 tx_queue_clean(dev->data->tx_queues[j]);
1149 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1150 rx_queue_clean(dev->data->rx_queues[j]);
1155 /* Update default vlan when not in VMDQ mode */
1156 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1157 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1159 fm10k_link_update(dev, 0);
1165 fm10k_dev_stop(struct rte_eth_dev *dev)
1167 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1168 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1169 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1172 PMD_INIT_FUNC_TRACE();
1174 if (dev->data->tx_queues)
1175 for (i = 0; i < dev->data->nb_tx_queues; i++)
1176 fm10k_dev_tx_queue_stop(dev, i);
1178 if (dev->data->rx_queues)
1179 for (i = 0; i < dev->data->nb_rx_queues; i++)
1180 fm10k_dev_rx_queue_stop(dev, i);
1182 /* Disable datapath event */
1183 if (rte_intr_dp_is_en(intr_handle)) {
1184 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1185 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1186 3 << FM10K_RXINT_TIMER_SHIFT);
1187 if (hw->mac.type == fm10k_mac_pf)
1188 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1189 FM10K_ITR_MASK_SET);
1191 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1192 FM10K_ITR_MASK_SET);
1195 /* Clean datapath event and queue/vec mapping */
1196 rte_intr_efd_disable(intr_handle);
1197 rte_free(intr_handle->intr_vec);
1198 intr_handle->intr_vec = NULL;
1202 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1206 PMD_INIT_FUNC_TRACE();
1208 if (dev->data->tx_queues) {
1209 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1210 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1216 if (dev->data->rx_queues) {
1217 for (i = 0; i < dev->data->nb_rx_queues; i++)
1218 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1223 fm10k_dev_close(struct rte_eth_dev *dev)
1225 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 PMD_INIT_FUNC_TRACE();
1230 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1231 MAX_LPORT_NUM, false);
1232 fm10k_mbx_unlock(hw);
1234 /* allow 100ms for device to quiesce */
1235 rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1237 /* Stop mailbox service first */
1238 fm10k_close_mbx_service(hw);
1239 fm10k_dev_stop(dev);
1240 fm10k_dev_queue_release(dev);
1245 fm10k_link_update(struct rte_eth_dev *dev,
1246 __rte_unused int wait_to_complete)
1248 struct fm10k_dev_info *dev_info =
1249 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1250 PMD_INIT_FUNC_TRACE();
1252 /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1253 * leave the speed undefined since there is no 50Gbps Ethernet.
1255 dev->data->dev_link.link_speed = 0;
1256 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1257 dev->data->dev_link.link_status =
1258 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1263 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1264 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1269 if (xstats_names != NULL) {
1270 /* Note: limit checked in rte_eth_xstats_names() */
1273 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1274 snprintf(xstats_names[count].name,
1275 sizeof(xstats_names[count].name),
1276 "%s", fm10k_hw_stats_strings[count].name);
1280 /* PF queue stats */
1281 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1282 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1283 snprintf(xstats_names[count].name,
1284 sizeof(xstats_names[count].name),
1286 fm10k_hw_stats_rx_q_strings[i].name);
1289 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1290 snprintf(xstats_names[count].name,
1291 sizeof(xstats_names[count].name),
1293 fm10k_hw_stats_tx_q_strings[i].name);
1298 return FM10K_NB_XSTATS;
1302 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1305 struct fm10k_hw_stats *hw_stats =
1306 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1307 unsigned i, q, count = 0;
1309 if (n < FM10K_NB_XSTATS)
1310 return FM10K_NB_XSTATS;
1313 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1314 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1315 fm10k_hw_stats_strings[count].offset);
1316 xstats[count].id = count;
1320 /* PF queue stats */
1321 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1322 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1323 xstats[count].value =
1324 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1325 fm10k_hw_stats_rx_q_strings[i].offset);
1326 xstats[count].id = count;
1329 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1330 xstats[count].value =
1331 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1332 fm10k_hw_stats_tx_q_strings[i].offset);
1333 xstats[count].id = count;
1338 return FM10K_NB_XSTATS;
1342 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1344 uint64_t ipackets, opackets, ibytes, obytes;
1345 struct fm10k_hw *hw =
1346 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1347 struct fm10k_hw_stats *hw_stats =
1348 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1351 PMD_INIT_FUNC_TRACE();
1353 fm10k_update_hw_stats(hw, hw_stats);
1355 ipackets = opackets = ibytes = obytes = 0;
1356 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1357 (i < hw->mac.max_queues); ++i) {
1358 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1359 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1360 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1361 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1362 ipackets += stats->q_ipackets[i];
1363 opackets += stats->q_opackets[i];
1364 ibytes += stats->q_ibytes[i];
1365 obytes += stats->q_obytes[i];
1367 stats->ipackets = ipackets;
1368 stats->opackets = opackets;
1369 stats->ibytes = ibytes;
1370 stats->obytes = obytes;
1375 fm10k_stats_reset(struct rte_eth_dev *dev)
1377 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1378 struct fm10k_hw_stats *hw_stats =
1379 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1381 PMD_INIT_FUNC_TRACE();
1383 memset(hw_stats, 0, sizeof(*hw_stats));
1384 fm10k_rebind_hw_stats(hw, hw_stats);
1388 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1389 struct rte_eth_dev_info *dev_info)
1391 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1392 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1394 PMD_INIT_FUNC_TRACE();
1396 dev_info->pci_dev = pdev;
1397 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1398 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1399 dev_info->max_rx_queues = hw->mac.max_queues;
1400 dev_info->max_tx_queues = hw->mac.max_queues;
1401 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1402 dev_info->max_hash_mac_addrs = 0;
1403 dev_info->max_vfs = pdev->max_vfs;
1404 dev_info->vmdq_pool_base = 0;
1405 dev_info->vmdq_queue_base = 0;
1406 dev_info->max_vmdq_pools = ETH_32_POOLS;
1407 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1408 dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
1409 dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
1410 dev_info->rx_queue_offload_capa;
1411 dev_info->tx_offload_capa =
1412 DEV_TX_OFFLOAD_VLAN_INSERT |
1413 DEV_TX_OFFLOAD_IPV4_CKSUM |
1414 DEV_TX_OFFLOAD_UDP_CKSUM |
1415 DEV_TX_OFFLOAD_TCP_CKSUM |
1416 DEV_TX_OFFLOAD_TCP_TSO;
1418 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1419 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1421 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1423 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1424 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1425 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1427 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1432 dev_info->default_txconf = (struct rte_eth_txconf) {
1434 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1435 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1436 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1438 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1439 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1440 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1443 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1444 .nb_max = FM10K_MAX_RX_DESC,
1445 .nb_min = FM10K_MIN_RX_DESC,
1446 .nb_align = FM10K_MULT_RX_DESC,
1449 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1450 .nb_max = FM10K_MAX_TX_DESC,
1451 .nb_min = FM10K_MIN_TX_DESC,
1452 .nb_align = FM10K_MULT_TX_DESC,
1453 .nb_seg_max = FM10K_TX_MAX_SEG,
1454 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1457 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1458 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1459 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1462 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1463 static const uint32_t *
1464 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1466 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1467 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1468 static uint32_t ptypes[] = {
1469 /* refers to rx_desc_to_ol_flags() */
1472 RTE_PTYPE_L3_IPV4_EXT,
1474 RTE_PTYPE_L3_IPV6_EXT,
1481 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1482 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1483 static uint32_t ptypes_vec[] = {
1484 /* refers to fm10k_desc_to_pktype_v() */
1486 RTE_PTYPE_L3_IPV4_EXT,
1488 RTE_PTYPE_L3_IPV6_EXT,
1491 RTE_PTYPE_TUNNEL_GENEVE,
1492 RTE_PTYPE_TUNNEL_NVGRE,
1493 RTE_PTYPE_TUNNEL_VXLAN,
1494 RTE_PTYPE_TUNNEL_GRE,
1504 static const uint32_t *
1505 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1512 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1515 uint16_t mac_num = 0;
1516 uint32_t vid_idx, vid_bit, mac_index;
1517 struct fm10k_hw *hw;
1518 struct fm10k_macvlan_filter_info *macvlan;
1519 struct rte_eth_dev_data *data = dev->data;
1521 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1524 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1525 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1529 if (vlan_id > ETH_VLAN_ID_MAX) {
1530 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1534 vid_idx = FM10K_VFTA_IDX(vlan_id);
1535 vid_bit = FM10K_VFTA_BIT(vlan_id);
1536 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1537 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1539 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1540 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1541 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1542 "in the VLAN filter table");
1547 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1548 fm10k_mbx_unlock(hw);
1549 if (result != FM10K_SUCCESS) {
1550 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1554 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1555 (result == FM10K_SUCCESS); mac_index++) {
1556 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1558 if (mac_num > macvlan->mac_num - 1) {
1559 PMD_INIT_LOG(ERR, "MAC address number "
1564 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1565 data->mac_addrs[mac_index].addr_bytes,
1567 fm10k_mbx_unlock(hw);
1570 if (result != FM10K_SUCCESS) {
1571 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1576 macvlan->vlan_num++;
1577 macvlan->vfta[vid_idx] |= vid_bit;
1579 macvlan->vlan_num--;
1580 macvlan->vfta[vid_idx] &= ~vid_bit;
1586 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1588 if (mask & ETH_VLAN_STRIP_MASK) {
1589 if (!(dev->data->dev_conf.rxmode.offloads &
1590 DEV_RX_OFFLOAD_VLAN_STRIP))
1591 PMD_INIT_LOG(ERR, "VLAN stripping is "
1592 "always on in fm10k");
1595 if (mask & ETH_VLAN_EXTEND_MASK) {
1596 if (dev->data->dev_conf.rxmode.offloads &
1597 DEV_RX_OFFLOAD_VLAN_EXTEND)
1598 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1599 "supported in fm10k");
1602 if (mask & ETH_VLAN_FILTER_MASK) {
1603 if (!(dev->data->dev_conf.rxmode.offloads &
1604 DEV_RX_OFFLOAD_VLAN_FILTER))
1605 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1611 /* Add/Remove a MAC address, and update filters to main VSI */
1612 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1613 const u8 *mac, bool add, uint32_t pool)
1615 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1616 struct fm10k_macvlan_filter_info *macvlan;
1619 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1621 if (pool != MAIN_VSI_POOL_NUMBER) {
1622 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1623 "mac to pool %u", pool);
1626 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1627 if (!macvlan->vfta[j])
1629 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1630 if (!(macvlan->vfta[j] & (1 << k)))
1632 if (i + 1 > macvlan->vlan_num) {
1633 PMD_INIT_LOG(ERR, "vlan number not match");
1637 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1638 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1639 fm10k_mbx_unlock(hw);
1645 /* Add/Remove a MAC address, and update filters to VMDQ */
1646 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1647 const u8 *mac, bool add, uint32_t pool)
1649 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1650 struct fm10k_macvlan_filter_info *macvlan;
1651 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1654 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1655 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1657 if (pool > macvlan->nb_queue_pools) {
1658 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1660 pool, macvlan->nb_queue_pools);
1663 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1664 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1667 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1668 vmdq_conf->pool_map[i].vlan_id, add, 0);
1669 fm10k_mbx_unlock(hw);
1673 /* Add/Remove a MAC address, and update filters */
1674 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1675 const u8 *mac, bool add, uint32_t pool)
1677 struct fm10k_macvlan_filter_info *macvlan;
1679 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1681 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1682 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1684 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1692 /* Add a MAC address, and update filters */
1694 fm10k_macaddr_add(struct rte_eth_dev *dev,
1695 struct ether_addr *mac_addr,
1699 struct fm10k_macvlan_filter_info *macvlan;
1701 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1702 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1703 macvlan->mac_vmdq_id[index] = pool;
1707 /* Remove a MAC address, and update filters */
1709 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1711 struct rte_eth_dev_data *data = dev->data;
1712 struct fm10k_macvlan_filter_info *macvlan;
1714 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1715 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1716 FALSE, macvlan->mac_vmdq_id[index]);
1717 macvlan->mac_vmdq_id[index] = 0;
1721 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1723 if ((request < min) || (request > max) || ((request % mult) != 0))
1731 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1733 if ((request < min) || (request > max) || ((div % request) != 0))
1740 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1742 uint16_t rx_free_thresh;
1744 if (conf->rx_free_thresh == 0)
1745 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1747 rx_free_thresh = conf->rx_free_thresh;
1749 /* make sure the requested threshold satisfies the constraints */
1750 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1751 FM10K_RX_FREE_THRESH_MAX(q),
1752 FM10K_RX_FREE_THRESH_DIV(q),
1754 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1755 "less than or equal to %u, "
1756 "greater than or equal to %u, "
1757 "and a divisor of %u",
1758 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1759 FM10K_RX_FREE_THRESH_MIN(q),
1760 FM10K_RX_FREE_THRESH_DIV(q));
1764 q->alloc_thresh = rx_free_thresh;
1765 q->drop_en = conf->rx_drop_en;
1766 q->rx_deferred_start = conf->rx_deferred_start;
1772 * Hardware requires specific alignment for Rx packet buffers. At
1773 * least one of the following two conditions must be satisfied.
1774 * 1. Address is 512B aligned
1775 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1777 * As such, the driver may need to adjust the DMA address within the
1778 * buffer by up to 512B.
1780 * return 1 if the element size is valid, otherwise return 0.
1783 mempool_element_size_valid(struct rte_mempool *mp)
1787 /* elt_size includes mbuf header and headroom */
1788 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1789 RTE_PKTMBUF_HEADROOM;
1791 /* account for up to 512B of alignment */
1792 min_size -= FM10K_RX_DATABUF_ALIGN;
1794 /* sanity check for overflow */
1795 if (min_size > mp->elt_size)
1802 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1806 return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
1809 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1813 return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
1814 DEV_RX_OFFLOAD_VLAN_FILTER |
1815 DEV_RX_OFFLOAD_IPV4_CKSUM |
1816 DEV_RX_OFFLOAD_UDP_CKSUM |
1817 DEV_RX_OFFLOAD_TCP_CKSUM |
1818 DEV_RX_OFFLOAD_JUMBO_FRAME |
1819 DEV_RX_OFFLOAD_CRC_STRIP |
1820 DEV_RX_OFFLOAD_HEADER_SPLIT);
1824 fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
1826 uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
1827 uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
1828 uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
1830 if ((requested & (queue_supported | port_supported)) != requested)
1833 if ((port_offloads ^ requested) & port_supported)
1840 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1841 uint16_t nb_desc, unsigned int socket_id,
1842 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1844 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1845 struct fm10k_dev_info *dev_info =
1846 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1847 struct fm10k_rx_queue *q;
1848 const struct rte_memzone *mz;
1850 PMD_INIT_FUNC_TRACE();
1852 if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
1853 PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
1854 " don't match port offloads 0x%" PRIx64
1855 " or supported port offloads 0x%" PRIx64
1856 " or supported queue offloads 0x%" PRIx64,
1857 (void *)dev, conf->offloads,
1858 dev->data->dev_conf.rxmode.offloads,
1859 fm10k_get_rx_port_offloads_capa(dev),
1860 fm10k_get_rx_queue_offloads_capa(dev));
1864 /* make sure the mempool element size can account for alignment. */
1865 if (!mempool_element_size_valid(mp)) {
1866 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1870 /* make sure a valid number of descriptors have been requested */
1871 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1872 FM10K_MULT_RX_DESC, nb_desc)) {
1873 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1874 "less than or equal to %"PRIu32", "
1875 "greater than or equal to %u, "
1876 "and a multiple of %u",
1877 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1878 FM10K_MULT_RX_DESC);
1883 * if this queue existed already, free the associated memory. The
1884 * queue cannot be reused in case we need to allocate memory on
1885 * different socket than was previously used.
1887 if (dev->data->rx_queues[queue_id] != NULL) {
1888 rx_queue_free(dev->data->rx_queues[queue_id]);
1889 dev->data->rx_queues[queue_id] = NULL;
1892 /* allocate memory for the queue structure */
1893 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1896 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1902 q->nb_desc = nb_desc;
1903 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1904 q->port_id = dev->data->port_id;
1905 q->queue_id = queue_id;
1906 q->tail_ptr = (volatile uint32_t *)
1907 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1908 q->offloads = conf->offloads;
1909 if (handle_rxconf(q, conf))
1912 /* allocate memory for the software ring */
1913 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1914 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1915 RTE_CACHE_LINE_SIZE, socket_id);
1916 if (q->sw_ring == NULL) {
1917 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1923 * allocate memory for the hardware descriptor ring. A memzone large
1924 * enough to hold the maximum ring size is requested to allow for
1925 * resizing in later calls to the queue setup function.
1927 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1928 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1931 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1932 rte_free(q->sw_ring);
1936 q->hw_ring = mz->addr;
1937 q->hw_ring_phys_addr = mz->iova;
1939 /* Check if number of descs satisfied Vector requirement */
1940 if (!rte_is_power_of_2(nb_desc)) {
1941 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1942 "preconditions - canceling the feature for "
1943 "the whole port[%d]",
1944 q->queue_id, q->port_id);
1945 dev_info->rx_vec_allowed = false;
1947 fm10k_rxq_vec_setup(q);
1949 dev->data->rx_queues[queue_id] = q;
1954 fm10k_rx_queue_release(void *queue)
1956 PMD_INIT_FUNC_TRACE();
1958 rx_queue_free(queue);
1962 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1964 uint16_t tx_free_thresh;
1965 uint16_t tx_rs_thresh;
1967 /* constraint MACROs require that tx_free_thresh is configured
1968 * before tx_rs_thresh */
1969 if (conf->tx_free_thresh == 0)
1970 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1972 tx_free_thresh = conf->tx_free_thresh;
1974 /* make sure the requested threshold satisfies the constraints */
1975 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1976 FM10K_TX_FREE_THRESH_MAX(q),
1977 FM10K_TX_FREE_THRESH_DIV(q),
1979 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1980 "less than or equal to %u, "
1981 "greater than or equal to %u, "
1982 "and a divisor of %u",
1983 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1984 FM10K_TX_FREE_THRESH_MIN(q),
1985 FM10K_TX_FREE_THRESH_DIV(q));
1989 q->free_thresh = tx_free_thresh;
1991 if (conf->tx_rs_thresh == 0)
1992 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1994 tx_rs_thresh = conf->tx_rs_thresh;
1996 q->tx_deferred_start = conf->tx_deferred_start;
1998 /* make sure the requested threshold satisfies the constraints */
1999 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
2000 FM10K_TX_RS_THRESH_MAX(q),
2001 FM10K_TX_RS_THRESH_DIV(q),
2003 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
2004 "less than or equal to %u, "
2005 "greater than or equal to %u, "
2006 "and a divisor of %u",
2007 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
2008 FM10K_TX_RS_THRESH_MIN(q),
2009 FM10K_TX_RS_THRESH_DIV(q));
2013 q->rs_thresh = tx_rs_thresh;
2019 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
2020 uint16_t nb_desc, unsigned int socket_id,
2021 const struct rte_eth_txconf *conf)
2023 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2024 struct fm10k_tx_queue *q;
2025 const struct rte_memzone *mz;
2027 PMD_INIT_FUNC_TRACE();
2029 /* make sure a valid number of descriptors have been requested */
2030 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
2031 FM10K_MULT_TX_DESC, nb_desc)) {
2032 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
2033 "less than or equal to %"PRIu32", "
2034 "greater than or equal to %u, "
2035 "and a multiple of %u",
2036 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
2037 FM10K_MULT_TX_DESC);
2042 * if this queue existed already, free the associated memory. The
2043 * queue cannot be reused in case we need to allocate memory on
2044 * different socket than was previously used.
2046 if (dev->data->tx_queues[queue_id] != NULL) {
2047 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2050 dev->data->tx_queues[queue_id] = NULL;
2053 /* allocate memory for the queue structure */
2054 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2057 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2062 q->nb_desc = nb_desc;
2063 q->port_id = dev->data->port_id;
2064 q->queue_id = queue_id;
2065 q->txq_flags = conf->txq_flags;
2066 q->ops = &def_txq_ops;
2067 q->tail_ptr = (volatile uint32_t *)
2068 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2069 if (handle_txconf(q, conf))
2072 /* allocate memory for the software ring */
2073 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2074 nb_desc * sizeof(struct rte_mbuf *),
2075 RTE_CACHE_LINE_SIZE, socket_id);
2076 if (q->sw_ring == NULL) {
2077 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2083 * allocate memory for the hardware descriptor ring. A memzone large
2084 * enough to hold the maximum ring size is requested to allow for
2085 * resizing in later calls to the queue setup function.
2087 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2088 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2091 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2092 rte_free(q->sw_ring);
2096 q->hw_ring = mz->addr;
2097 q->hw_ring_phys_addr = mz->iova;
2100 * allocate memory for the RS bit tracker. Enough slots to hold the
2101 * descriptor index for each RS bit needing to be set are required.
2103 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2104 ((nb_desc + 1) / q->rs_thresh) *
2106 RTE_CACHE_LINE_SIZE, socket_id);
2107 if (q->rs_tracker.list == NULL) {
2108 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2109 rte_free(q->sw_ring);
2114 dev->data->tx_queues[queue_id] = q;
2119 fm10k_tx_queue_release(void *queue)
2121 struct fm10k_tx_queue *q = queue;
2122 PMD_INIT_FUNC_TRACE();
2128 fm10k_reta_update(struct rte_eth_dev *dev,
2129 struct rte_eth_rss_reta_entry64 *reta_conf,
2132 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2133 uint16_t i, j, idx, shift;
2137 PMD_INIT_FUNC_TRACE();
2139 if (reta_size > FM10K_MAX_RSS_INDICES) {
2140 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2141 "(%d) doesn't match the number hardware can supported "
2142 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2147 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2148 * 128-entries in 32 registers
2150 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2151 idx = i / RTE_RETA_GROUP_SIZE;
2152 shift = i % RTE_RETA_GROUP_SIZE;
2153 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2154 BIT_MASK_PER_UINT32);
2159 if (mask != BIT_MASK_PER_UINT32)
2160 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2162 for (j = 0; j < CHARS_PER_UINT32; j++) {
2163 if (mask & (0x1 << j)) {
2165 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2166 reta |= reta_conf[idx].reta[shift + j] <<
2170 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2177 fm10k_reta_query(struct rte_eth_dev *dev,
2178 struct rte_eth_rss_reta_entry64 *reta_conf,
2181 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2182 uint16_t i, j, idx, shift;
2186 PMD_INIT_FUNC_TRACE();
2188 if (reta_size < FM10K_MAX_RSS_INDICES) {
2189 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2190 "(%d) doesn't match the number hardware can supported "
2191 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2196 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2197 * 128-entries in 32 registers
2199 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2200 idx = i / RTE_RETA_GROUP_SIZE;
2201 shift = i % RTE_RETA_GROUP_SIZE;
2202 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2203 BIT_MASK_PER_UINT32);
2207 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2208 for (j = 0; j < CHARS_PER_UINT32; j++) {
2209 if (mask & (0x1 << j))
2210 reta_conf[idx].reta[shift + j] = ((reta >>
2211 CHAR_BIT * j) & UINT8_MAX);
2219 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2220 struct rte_eth_rss_conf *rss_conf)
2222 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2223 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2225 uint64_t hf = rss_conf->rss_hf;
2228 PMD_INIT_FUNC_TRACE();
2230 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2231 FM10K_RSSRK_ENTRIES_PER_REG))
2238 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2239 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2240 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2241 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2242 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2243 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2244 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2245 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2246 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2248 /* If the mapping doesn't fit any supported, return */
2253 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2254 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2256 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2262 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2263 struct rte_eth_rss_conf *rss_conf)
2265 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2266 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2271 PMD_INIT_FUNC_TRACE();
2273 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2274 FM10K_RSSRK_ENTRIES_PER_REG))
2278 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2279 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2281 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2283 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2284 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2285 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2286 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2287 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2288 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2289 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2290 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2291 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2293 rss_conf->rss_hf = hf;
2299 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2301 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2302 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2304 /* Bind all local non-queue interrupt to vector 0 */
2305 int_map |= FM10K_MISC_VEC_ID;
2307 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2308 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2309 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2310 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2311 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2312 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2314 /* Enable misc causes */
2315 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2316 FM10K_EIMR_ENABLE(THI_FAULT) |
2317 FM10K_EIMR_ENABLE(FUM_FAULT) |
2318 FM10K_EIMR_ENABLE(MAILBOX) |
2319 FM10K_EIMR_ENABLE(SWITCHREADY) |
2320 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2321 FM10K_EIMR_ENABLE(SRAMERROR) |
2322 FM10K_EIMR_ENABLE(VFLR));
2325 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2326 FM10K_ITR_MASK_CLEAR);
2327 FM10K_WRITE_FLUSH(hw);
2331 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2333 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2334 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2336 int_map |= FM10K_MISC_VEC_ID;
2338 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2339 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2340 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2341 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2342 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2343 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2345 /* Disable misc causes */
2346 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2347 FM10K_EIMR_DISABLE(THI_FAULT) |
2348 FM10K_EIMR_DISABLE(FUM_FAULT) |
2349 FM10K_EIMR_DISABLE(MAILBOX) |
2350 FM10K_EIMR_DISABLE(SWITCHREADY) |
2351 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2352 FM10K_EIMR_DISABLE(SRAMERROR) |
2353 FM10K_EIMR_DISABLE(VFLR));
2356 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2357 FM10K_WRITE_FLUSH(hw);
2361 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2363 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2364 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2366 /* Bind all local non-queue interrupt to vector 0 */
2367 int_map |= FM10K_MISC_VEC_ID;
2369 /* Only INT 0 available, other 15 are reserved. */
2370 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2373 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2374 FM10K_ITR_MASK_CLEAR);
2375 FM10K_WRITE_FLUSH(hw);
2379 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2381 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2382 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2384 int_map |= FM10K_MISC_VEC_ID;
2386 /* Only INT 0 available, other 15 are reserved. */
2387 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2390 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2391 FM10K_WRITE_FLUSH(hw);
2395 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2397 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2398 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2401 if (hw->mac.type == fm10k_mac_pf)
2402 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2403 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2405 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2406 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2407 rte_intr_enable(&pdev->intr_handle);
2412 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2414 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2415 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2418 if (hw->mac.type == fm10k_mac_pf)
2419 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2420 FM10K_ITR_MASK_SET);
2422 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2423 FM10K_ITR_MASK_SET);
2428 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2430 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2431 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2432 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2433 uint32_t intr_vector, vec;
2437 /* fm10k needs one separate interrupt for mailbox,
2438 * so only drivers which support multiple interrupt vectors
2439 * e.g. vfio-pci can work for fm10k interrupt mode
2441 if (!rte_intr_cap_multiple(intr_handle) ||
2442 dev->data->dev_conf.intr_conf.rxq == 0)
2445 intr_vector = dev->data->nb_rx_queues;
2447 /* disable interrupt first */
2448 rte_intr_disable(intr_handle);
2449 if (hw->mac.type == fm10k_mac_pf)
2450 fm10k_dev_disable_intr_pf(dev);
2452 fm10k_dev_disable_intr_vf(dev);
2454 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2455 PMD_INIT_LOG(ERR, "Failed to init event fd");
2459 if (rte_intr_dp_is_en(intr_handle) && !result) {
2460 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2461 dev->data->nb_rx_queues * sizeof(int), 0);
2462 if (intr_handle->intr_vec) {
2463 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2464 queue_id < dev->data->nb_rx_queues;
2466 intr_handle->intr_vec[queue_id] = vec;
2467 if (vec < intr_handle->nb_efd - 1
2468 + FM10K_RX_VEC_START)
2472 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2473 " intr_vec", dev->data->nb_rx_queues);
2474 rte_intr_efd_disable(intr_handle);
2479 if (hw->mac.type == fm10k_mac_pf)
2480 fm10k_dev_enable_intr_pf(dev);
2482 fm10k_dev_enable_intr_vf(dev);
2483 rte_intr_enable(intr_handle);
2484 hw->mac.ops.update_int_moderator(hw);
2489 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2491 struct fm10k_fault fault;
2493 const char *estr = "Unknown error";
2495 /* Process PCA fault */
2496 if (eicr & FM10K_EICR_PCA_FAULT) {
2497 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2500 switch (fault.type) {
2502 estr = "PCA_NO_FAULT"; break;
2503 case PCA_UNMAPPED_ADDR:
2504 estr = "PCA_UNMAPPED_ADDR"; break;
2505 case PCA_BAD_QACCESS_PF:
2506 estr = "PCA_BAD_QACCESS_PF"; break;
2507 case PCA_BAD_QACCESS_VF:
2508 estr = "PCA_BAD_QACCESS_VF"; break;
2509 case PCA_MALICIOUS_REQ:
2510 estr = "PCA_MALICIOUS_REQ"; break;
2511 case PCA_POISONED_TLP:
2512 estr = "PCA_POISONED_TLP"; break;
2514 estr = "PCA_TLP_ABORT"; break;
2518 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2519 estr, fault.func ? "VF" : "PF", fault.func,
2520 fault.address, fault.specinfo);
2523 /* Process THI fault */
2524 if (eicr & FM10K_EICR_THI_FAULT) {
2525 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2528 switch (fault.type) {
2530 estr = "THI_NO_FAULT"; break;
2531 case THI_MAL_DIS_Q_FAULT:
2532 estr = "THI_MAL_DIS_Q_FAULT"; break;
2536 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2537 estr, fault.func ? "VF" : "PF", fault.func,
2538 fault.address, fault.specinfo);
2541 /* Process FUM fault */
2542 if (eicr & FM10K_EICR_FUM_FAULT) {
2543 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2546 switch (fault.type) {
2548 estr = "FUM_NO_FAULT"; break;
2549 case FUM_UNMAPPED_ADDR:
2550 estr = "FUM_UNMAPPED_ADDR"; break;
2551 case FUM_POISONED_TLP:
2552 estr = "FUM_POISONED_TLP"; break;
2553 case FUM_BAD_VF_QACCESS:
2554 estr = "FUM_BAD_VF_QACCESS"; break;
2555 case FUM_ADD_DECODE_ERR:
2556 estr = "FUM_ADD_DECODE_ERR"; break;
2558 estr = "FUM_RO_ERROR"; break;
2559 case FUM_QPRC_CRC_ERROR:
2560 estr = "FUM_QPRC_CRC_ERROR"; break;
2561 case FUM_CSR_TIMEOUT:
2562 estr = "FUM_CSR_TIMEOUT"; break;
2563 case FUM_INVALID_TYPE:
2564 estr = "FUM_INVALID_TYPE"; break;
2565 case FUM_INVALID_LENGTH:
2566 estr = "FUM_INVALID_LENGTH"; break;
2567 case FUM_INVALID_BE:
2568 estr = "FUM_INVALID_BE"; break;
2569 case FUM_INVALID_ALIGN:
2570 estr = "FUM_INVALID_ALIGN"; break;
2574 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2575 estr, fault.func ? "VF" : "PF", fault.func,
2576 fault.address, fault.specinfo);
2581 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2586 * PF interrupt handler triggered by NIC for handling specific interrupt.
2589 * Pointer to interrupt handle.
2591 * The address of parameter (struct rte_eth_dev *) regsitered before.
2597 fm10k_dev_interrupt_handler_pf(void *param)
2599 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2600 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2601 uint32_t cause, status;
2602 struct fm10k_dev_info *dev_info =
2603 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2607 if (hw->mac.type != fm10k_mac_pf)
2610 cause = FM10K_READ_REG(hw, FM10K_EICR);
2612 /* Handle PCI fault cases */
2613 if (cause & FM10K_EICR_FAULT_MASK) {
2614 PMD_INIT_LOG(ERR, "INT: find fault!");
2615 fm10k_dev_handle_fault(hw, cause);
2618 /* Handle switch up/down */
2619 if (cause & FM10K_EICR_SWITCHNOTREADY)
2620 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2622 if (cause & FM10K_EICR_SWITCHREADY) {
2623 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2624 if (dev_info->sm_down == 1) {
2627 /* For recreating logical ports */
2628 status_mbx = hw->mac.ops.update_lport_state(hw,
2629 hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2630 if (status_mbx == FM10K_SUCCESS)
2632 "INT: Recreated Logical port");
2635 "INT: Logical ports weren't recreated");
2637 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2638 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2639 if (status_mbx != FM10K_SUCCESS)
2640 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2642 fm10k_mbx_unlock(hw);
2644 /* first clear the internal SW recording structure */
2645 if (!(dev->data->dev_conf.rxmode.mq_mode &
2646 ETH_MQ_RX_VMDQ_FLAG))
2647 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2650 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2651 MAIN_VSI_POOL_NUMBER);
2654 * Add default mac address and vlan for the logical
2655 * ports that have been created, leave to the
2656 * application to fully recover Rx filtering.
2658 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2659 MAIN_VSI_POOL_NUMBER);
2661 if (!(dev->data->dev_conf.rxmode.mq_mode &
2662 ETH_MQ_RX_VMDQ_FLAG))
2663 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2666 dev_info->sm_down = 0;
2667 _rte_eth_dev_callback_process(dev,
2668 RTE_ETH_EVENT_INTR_LSC,
2673 /* Handle mailbox message */
2675 err = hw->mbx.ops.process(hw, &hw->mbx);
2676 fm10k_mbx_unlock(hw);
2678 if (err == FM10K_ERR_RESET_REQUESTED) {
2679 PMD_INIT_LOG(INFO, "INT: Switch is down");
2680 dev_info->sm_down = 1;
2681 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2685 /* Handle SRAM error */
2686 if (cause & FM10K_EICR_SRAMERROR) {
2687 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2689 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2690 /* Write to clear pending bits */
2691 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2693 /* Todo: print out error message after shared code updates */
2696 /* Clear these 3 events if having any */
2697 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2698 FM10K_EICR_SWITCHREADY;
2700 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2702 /* Re-enable interrupt from device side */
2703 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2704 FM10K_ITR_MASK_CLEAR);
2705 /* Re-enable interrupt from host side */
2706 rte_intr_enable(dev->intr_handle);
2710 * VF interrupt handler triggered by NIC for handling specific interrupt.
2713 * Pointer to interrupt handle.
2715 * The address of parameter (struct rte_eth_dev *) regsitered before.
2721 fm10k_dev_interrupt_handler_vf(void *param)
2723 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2724 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2725 struct fm10k_mbx_info *mbx = &hw->mbx;
2726 struct fm10k_dev_info *dev_info =
2727 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2728 const enum fm10k_mbx_state state = mbx->state;
2731 if (hw->mac.type != fm10k_mac_vf)
2734 /* Handle mailbox message if lock is acquired */
2736 hw->mbx.ops.process(hw, &hw->mbx);
2737 fm10k_mbx_unlock(hw);
2739 if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2740 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2743 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2745 fm10k_mbx_unlock(hw);
2747 /* Setting reset flag */
2748 dev_info->sm_down = 1;
2749 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2753 if (dev_info->sm_down == 1 &&
2754 hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2755 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2757 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2758 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2759 if (status_mbx != FM10K_SUCCESS)
2760 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2761 fm10k_mbx_unlock(hw);
2763 /* first clear the internal SW recording structure */
2764 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2765 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2766 MAIN_VSI_POOL_NUMBER);
2769 * Add default mac address and vlan for the logical ports that
2770 * have been created, leave to the application to fully recover
2773 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2774 MAIN_VSI_POOL_NUMBER);
2775 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2777 dev_info->sm_down = 0;
2778 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2782 /* Re-enable interrupt from device side */
2783 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2784 FM10K_ITR_MASK_CLEAR);
2785 /* Re-enable interrupt from host side */
2786 rte_intr_enable(dev->intr_handle);
2789 /* Mailbox message handler in VF */
2790 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2791 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2792 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2793 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2794 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2798 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2802 /* Initialize mailbox lock */
2803 fm10k_mbx_initlock(hw);
2805 /* Replace default message handler with new ones */
2806 if (hw->mac.type == fm10k_mac_vf)
2807 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2810 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2814 /* Connect to SM for PF device or PF for VF device */
2815 return hw->mbx.ops.connect(hw, &hw->mbx);
2819 fm10k_close_mbx_service(struct fm10k_hw *hw)
2821 /* Disconnect from SM for PF device or PF for VF device */
2822 hw->mbx.ops.disconnect(hw, &hw->mbx);
2825 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2826 .dev_configure = fm10k_dev_configure,
2827 .dev_start = fm10k_dev_start,
2828 .dev_stop = fm10k_dev_stop,
2829 .dev_close = fm10k_dev_close,
2830 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2831 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2832 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2833 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2834 .stats_get = fm10k_stats_get,
2835 .xstats_get = fm10k_xstats_get,
2836 .xstats_get_names = fm10k_xstats_get_names,
2837 .stats_reset = fm10k_stats_reset,
2838 .xstats_reset = fm10k_stats_reset,
2839 .link_update = fm10k_link_update,
2840 .dev_infos_get = fm10k_dev_infos_get,
2841 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2842 .vlan_filter_set = fm10k_vlan_filter_set,
2843 .vlan_offload_set = fm10k_vlan_offload_set,
2844 .mac_addr_add = fm10k_macaddr_add,
2845 .mac_addr_remove = fm10k_macaddr_remove,
2846 .rx_queue_start = fm10k_dev_rx_queue_start,
2847 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2848 .tx_queue_start = fm10k_dev_tx_queue_start,
2849 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2850 .rx_queue_setup = fm10k_rx_queue_setup,
2851 .rx_queue_release = fm10k_rx_queue_release,
2852 .tx_queue_setup = fm10k_tx_queue_setup,
2853 .tx_queue_release = fm10k_tx_queue_release,
2854 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2855 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2856 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2857 .reta_update = fm10k_reta_update,
2858 .reta_query = fm10k_reta_query,
2859 .rss_hash_update = fm10k_rss_hash_update,
2860 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2863 static int ftag_check_handler(__rte_unused const char *key,
2864 const char *value, __rte_unused void *opaque)
2866 if (strcmp(value, "1"))
2873 fm10k_check_ftag(struct rte_devargs *devargs)
2875 struct rte_kvargs *kvlist;
2876 const char *ftag_key = "enable_ftag";
2878 if (devargs == NULL)
2881 kvlist = rte_kvargs_parse(devargs->args, NULL);
2885 if (!rte_kvargs_count(kvlist, ftag_key)) {
2886 rte_kvargs_free(kvlist);
2889 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2890 if (rte_kvargs_process(kvlist, ftag_key,
2891 ftag_check_handler, NULL) < 0) {
2892 rte_kvargs_free(kvlist);
2895 rte_kvargs_free(kvlist);
2901 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2905 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2910 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2911 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2922 static void __attribute__((cold))
2923 fm10k_set_tx_function(struct rte_eth_dev *dev)
2925 struct fm10k_tx_queue *txq;
2928 uint16_t tx_ftag_en = 0;
2930 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2931 /* primary process has set the ftag flag and txq_flags */
2932 txq = dev->data->tx_queues[0];
2933 if (fm10k_tx_vec_condition_check(txq)) {
2934 dev->tx_pkt_burst = fm10k_xmit_pkts;
2935 dev->tx_pkt_prepare = fm10k_prep_pkts;
2936 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2938 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2939 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2940 dev->tx_pkt_prepare = NULL;
2945 if (fm10k_check_ftag(dev->device->devargs))
2948 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2949 txq = dev->data->tx_queues[i];
2950 txq->tx_ftag_en = tx_ftag_en;
2951 /* Check if Vector Tx is satisfied */
2952 if (fm10k_tx_vec_condition_check(txq))
2957 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2958 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2959 txq = dev->data->tx_queues[i];
2960 fm10k_txq_vec_setup(txq);
2962 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2963 dev->tx_pkt_prepare = NULL;
2965 dev->tx_pkt_burst = fm10k_xmit_pkts;
2966 dev->tx_pkt_prepare = fm10k_prep_pkts;
2967 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2971 static void __attribute__((cold))
2972 fm10k_set_rx_function(struct rte_eth_dev *dev)
2974 struct fm10k_dev_info *dev_info =
2975 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2976 uint16_t i, rx_using_sse;
2977 uint16_t rx_ftag_en = 0;
2979 if (fm10k_check_ftag(dev->device->devargs))
2982 /* In order to allow Vector Rx there are a few configuration
2983 * conditions to be met.
2985 if (!fm10k_rx_vec_condition_check(dev) &&
2986 dev_info->rx_vec_allowed && !rx_ftag_en) {
2987 if (dev->data->scattered_rx)
2988 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2990 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2991 } else if (dev->data->scattered_rx)
2992 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2994 dev->rx_pkt_burst = fm10k_recv_pkts;
2997 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2998 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
3001 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
3003 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
3005 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3008 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3009 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
3011 rxq->rx_using_sse = rx_using_sse;
3012 rxq->rx_ftag_en = rx_ftag_en;
3017 fm10k_params_init(struct rte_eth_dev *dev)
3019 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3020 struct fm10k_dev_info *info =
3021 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
3023 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
3024 * there is no way to get link status without reading BAR4. Until this
3025 * works, assume we have maximum bandwidth.
3026 * @todo - fix bus info
3028 hw->bus_caps.speed = fm10k_bus_speed_8000;
3029 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
3030 hw->bus_caps.payload = fm10k_bus_payload_512;
3031 hw->bus.speed = fm10k_bus_speed_8000;
3032 hw->bus.width = fm10k_bus_width_pcie_x8;
3033 hw->bus.payload = fm10k_bus_payload_256;
3035 info->rx_vec_allowed = true;
3039 eth_fm10k_dev_init(struct rte_eth_dev *dev)
3041 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3042 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3043 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3045 struct fm10k_macvlan_filter_info *macvlan;
3047 PMD_INIT_FUNC_TRACE();
3049 dev->dev_ops = &fm10k_eth_dev_ops;
3050 dev->rx_pkt_burst = &fm10k_recv_pkts;
3051 dev->tx_pkt_burst = &fm10k_xmit_pkts;
3052 dev->tx_pkt_prepare = &fm10k_prep_pkts;
3055 * Primary process does the whole initialization, for secondary
3056 * processes, we just select the same Rx and Tx function as primary.
3058 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3059 fm10k_set_rx_function(dev);
3060 fm10k_set_tx_function(dev);
3064 rte_eth_copy_pci_info(dev, pdev);
3066 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3067 memset(macvlan, 0, sizeof(*macvlan));
3068 /* Vendor and Device ID need to be set before init of shared code */
3069 memset(hw, 0, sizeof(*hw));
3070 hw->device_id = pdev->id.device_id;
3071 hw->vendor_id = pdev->id.vendor_id;
3072 hw->subsystem_device_id = pdev->id.subsystem_device_id;
3073 hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3074 hw->revision_id = 0;
3075 hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3076 if (hw->hw_addr == NULL) {
3077 PMD_INIT_LOG(ERR, "Bad mem resource."
3078 " Try to blacklist unused devices.");
3082 /* Store fm10k_adapter pointer */
3083 hw->back = dev->data->dev_private;
3085 /* Initialize the shared code */
3086 diag = fm10k_init_shared_code(hw);
3087 if (diag != FM10K_SUCCESS) {
3088 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3092 /* Initialize parameters */
3093 fm10k_params_init(dev);
3095 /* Initialize the hw */
3096 diag = fm10k_init_hw(hw);
3097 if (diag != FM10K_SUCCESS) {
3098 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3102 /* Initialize MAC address(es) */
3103 dev->data->mac_addrs = rte_zmalloc("fm10k",
3104 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3105 if (dev->data->mac_addrs == NULL) {
3106 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3110 diag = fm10k_read_mac_addr(hw);
3112 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3113 &dev->data->mac_addrs[0]);
3115 if (diag != FM10K_SUCCESS ||
3116 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3118 /* Generate a random addr */
3119 eth_random_addr(hw->mac.addr);
3120 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3121 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3122 &dev->data->mac_addrs[0]);
3125 /* Reset the hw statistics */
3126 fm10k_stats_reset(dev);
3129 diag = fm10k_reset_hw(hw);
3130 if (diag != FM10K_SUCCESS) {
3131 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3135 /* Setup mailbox service */
3136 diag = fm10k_setup_mbx_service(hw);
3137 if (diag != FM10K_SUCCESS) {
3138 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3142 /*PF/VF has different interrupt handling mechanism */
3143 if (hw->mac.type == fm10k_mac_pf) {
3144 /* register callback func to eal lib */
3145 rte_intr_callback_register(intr_handle,
3146 fm10k_dev_interrupt_handler_pf, (void *)dev);
3148 /* enable MISC interrupt */
3149 fm10k_dev_enable_intr_pf(dev);
3151 rte_intr_callback_register(intr_handle,
3152 fm10k_dev_interrupt_handler_vf, (void *)dev);
3154 fm10k_dev_enable_intr_vf(dev);
3157 /* Enable intr after callback registered */
3158 rte_intr_enable(intr_handle);
3160 hw->mac.ops.update_int_moderator(hw);
3162 /* Make sure Switch Manager is ready before going forward. */
3163 if (hw->mac.type == fm10k_mac_pf) {
3164 int switch_ready = 0;
3166 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3168 hw->mac.ops.get_host_state(hw, &switch_ready);
3169 fm10k_mbx_unlock(hw);
3172 /* Delay some time to acquire async LPORT_MAP info. */
3173 rte_delay_us(WAIT_SWITCH_MSG_US);
3176 if (switch_ready == 0) {
3177 PMD_INIT_LOG(ERR, "switch is not ready");
3183 * Below function will trigger operations on mailbox, acquire lock to
3184 * avoid race condition from interrupt handler. Operations on mailbox
3185 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3186 * will handle and generate an interrupt to our side. Then, FIFO in
3187 * mailbox will be touched.
3190 /* Enable port first */
3191 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3194 /* Set unicast mode by default. App can change to other mode in other
3197 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3198 FM10K_XCAST_MODE_NONE);
3200 fm10k_mbx_unlock(hw);
3202 /* Make sure default VID is ready before going forward. */
3203 if (hw->mac.type == fm10k_mac_pf) {
3204 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3205 if (hw->mac.default_vid)
3207 /* Delay some time to acquire async port VLAN info. */
3208 rte_delay_us(WAIT_SWITCH_MSG_US);
3211 if (!hw->mac.default_vid) {
3212 PMD_INIT_LOG(ERR, "default VID is not ready");
3217 /* Add default mac address */
3218 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3219 MAIN_VSI_POOL_NUMBER);
3225 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3227 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3228 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3229 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3230 PMD_INIT_FUNC_TRACE();
3232 /* only uninitialize in the primary process */
3233 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3236 /* safe to close dev here */
3237 fm10k_dev_close(dev);
3239 dev->dev_ops = NULL;
3240 dev->rx_pkt_burst = NULL;
3241 dev->tx_pkt_burst = NULL;
3243 /* disable uio/vfio intr */
3244 rte_intr_disable(intr_handle);
3246 /*PF/VF has different interrupt handling mechanism */
3247 if (hw->mac.type == fm10k_mac_pf) {
3248 /* disable interrupt */
3249 fm10k_dev_disable_intr_pf(dev);
3251 /* unregister callback func to eal lib */
3252 rte_intr_callback_unregister(intr_handle,
3253 fm10k_dev_interrupt_handler_pf, (void *)dev);
3255 /* disable interrupt */
3256 fm10k_dev_disable_intr_vf(dev);
3258 rte_intr_callback_unregister(intr_handle,
3259 fm10k_dev_interrupt_handler_vf, (void *)dev);
3262 /* free mac memory */
3263 if (dev->data->mac_addrs) {
3264 rte_free(dev->data->mac_addrs);
3265 dev->data->mac_addrs = NULL;
3268 memset(hw, 0, sizeof(*hw));
3273 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3274 struct rte_pci_device *pci_dev)
3276 return rte_eth_dev_pci_generic_probe(pci_dev,
3277 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3280 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3282 return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3286 * The set of PCI devices this driver supports. This driver will enable both PF
3287 * and SRIOV-VF devices.
3289 static const struct rte_pci_id pci_id_fm10k_map[] = {
3290 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3291 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3292 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3293 { .vendor_id = 0, /* sentinel */ },
3296 static struct rte_pci_driver rte_pmd_fm10k = {
3297 .id_table = pci_id_fm10k_map,
3298 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3299 RTE_PCI_DRV_IOVA_AS_VA,
3300 .probe = eth_fm10k_pci_probe,
3301 .remove = eth_fm10k_pci_remove,
3304 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3305 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3306 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3308 RTE_INIT(fm10k_init_log);
3310 fm10k_init_log(void)
3312 fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
3313 if (fm10k_logtype_init >= 0)
3314 rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
3315 fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
3316 if (fm10k_logtype_driver >= 0)
3317 rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);