1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
5 #include <rte_ethdev.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
15 #include "base/fm10k_api.h"
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
21 #define MAIN_VSI_POOL_NUMBER 0
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US 100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 10000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM 128
38 #define GLORT_FD_Q_BASE 0x40
39 #define GLORT_PF_MASK 0xFFC0
40 #define GLORT_FD_MASK GLORT_PF_MASK
41 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
43 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
44 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
45 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
46 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
47 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
48 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
50 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
51 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
52 const u8 *mac, bool add, uint32_t pool);
53 static void fm10k_tx_queue_release(void *queue);
54 static void fm10k_rx_queue_release(void *queue);
55 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
56 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
57 static int fm10k_check_ftag(struct rte_devargs *devargs);
58 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
60 struct fm10k_xstats_name_off {
61 char name[RTE_ETH_XSTATS_NAME_SIZE];
65 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
66 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
67 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
68 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
69 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
70 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
71 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
72 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
73 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
77 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
78 sizeof(fm10k_hw_stats_strings[0]))
80 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
81 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
82 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
83 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
86 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
87 sizeof(fm10k_hw_stats_rx_q_strings[0]))
89 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
90 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
91 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
94 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
95 sizeof(fm10k_hw_stats_tx_q_strings[0]))
97 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
98 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
100 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
103 fm10k_mbx_initlock(struct fm10k_hw *hw)
105 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
109 fm10k_mbx_lock(struct fm10k_hw *hw)
111 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
112 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
116 fm10k_mbx_unlock(struct fm10k_hw *hw)
118 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
121 /* Stubs needed for linkage when vPMD is disabled */
122 int __attribute__((weak))
123 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
128 uint16_t __attribute__((weak))
130 __rte_unused void *rx_queue,
131 __rte_unused struct rte_mbuf **rx_pkts,
132 __rte_unused uint16_t nb_pkts)
137 uint16_t __attribute__((weak))
138 fm10k_recv_scattered_pkts_vec(
139 __rte_unused void *rx_queue,
140 __rte_unused struct rte_mbuf **rx_pkts,
141 __rte_unused uint16_t nb_pkts)
146 int __attribute__((weak))
147 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
153 void __attribute__((weak))
154 fm10k_rx_queue_release_mbufs_vec(
155 __rte_unused struct fm10k_rx_queue *rxq)
160 void __attribute__((weak))
161 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
166 int __attribute__((weak))
167 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
172 uint16_t __attribute__((weak))
173 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
174 __rte_unused struct rte_mbuf **tx_pkts,
175 __rte_unused uint16_t nb_pkts)
181 * reset queue to initial state, allocate software buffers used when starting
183 * return 0 on success
184 * return -ENOMEM if buffers cannot be allocated
185 * return -EINVAL if buffers do not satisfy alignment condition
188 rx_queue_reset(struct fm10k_rx_queue *q)
190 static const union fm10k_rx_desc zero = {{0} };
193 PMD_INIT_FUNC_TRACE();
195 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
199 for (i = 0; i < q->nb_desc; ++i) {
200 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
201 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
202 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
206 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
207 q->hw_ring[i].q.pkt_addr = dma_addr;
208 q->hw_ring[i].q.hdr_addr = dma_addr;
211 /* initialize extra software ring entries. Space for these extra
212 * entries is always allocated.
214 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
215 for (i = 0; i < q->nb_fake_desc; ++i) {
216 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
217 q->hw_ring[q->nb_desc + i] = zero;
222 q->next_trigger = q->alloc_thresh - 1;
223 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
224 q->rxrearm_start = 0;
231 * clean queue, descriptor rings, free software buffers used when stopping
235 rx_queue_clean(struct fm10k_rx_queue *q)
237 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
239 PMD_INIT_FUNC_TRACE();
241 /* zero descriptor rings */
242 for (i = 0; i < q->nb_desc; ++i)
243 q->hw_ring[i] = zero;
245 /* zero faked descriptors */
246 for (i = 0; i < q->nb_fake_desc; ++i)
247 q->hw_ring[q->nb_desc + i] = zero;
249 /* vPMD driver has a different way of releasing mbufs. */
250 if (q->rx_using_sse) {
251 fm10k_rx_queue_release_mbufs_vec(q);
255 /* free software buffers */
256 for (i = 0; i < q->nb_desc; ++i) {
258 rte_pktmbuf_free_seg(q->sw_ring[i]);
259 q->sw_ring[i] = NULL;
265 * free all queue memory used when releasing the queue (i.e. configure)
268 rx_queue_free(struct fm10k_rx_queue *q)
270 PMD_INIT_FUNC_TRACE();
272 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
275 rte_free(q->sw_ring);
284 * disable RX queue, wait unitl HW finished necessary flush operation
287 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
291 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
292 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
293 reg & ~FM10K_RXQCTL_ENABLE);
295 /* Wait 100us at most */
296 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
298 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
299 if (!(reg & FM10K_RXQCTL_ENABLE))
303 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
310 * reset queue to initial state, allocate software buffers used when starting
314 tx_queue_reset(struct fm10k_tx_queue *q)
316 PMD_INIT_FUNC_TRACE();
320 q->nb_free = q->nb_desc - 1;
321 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
322 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
326 * clean queue, descriptor rings, free software buffers used when stopping
330 tx_queue_clean(struct fm10k_tx_queue *q)
332 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
334 PMD_INIT_FUNC_TRACE();
336 /* zero descriptor rings */
337 for (i = 0; i < q->nb_desc; ++i)
338 q->hw_ring[i] = zero;
340 /* free software buffers */
341 for (i = 0; i < q->nb_desc; ++i) {
343 rte_pktmbuf_free_seg(q->sw_ring[i]);
344 q->sw_ring[i] = NULL;
350 * free all queue memory used when releasing the queue (i.e. configure)
353 tx_queue_free(struct fm10k_tx_queue *q)
355 PMD_INIT_FUNC_TRACE();
357 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
359 if (q->rs_tracker.list) {
360 rte_free(q->rs_tracker.list);
361 q->rs_tracker.list = NULL;
364 rte_free(q->sw_ring);
373 * disable TX queue, wait unitl HW finished necessary flush operation
376 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
380 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
381 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
382 reg & ~FM10K_TXDCTL_ENABLE);
384 /* Wait 100us at most */
385 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
387 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
388 if (!(reg & FM10K_TXDCTL_ENABLE))
392 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
399 fm10k_check_mq_mode(struct rte_eth_dev *dev)
401 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
402 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
403 struct rte_eth_vmdq_rx_conf *vmdq_conf;
404 uint16_t nb_rx_q = dev->data->nb_rx_queues;
406 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
408 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
409 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
413 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
416 if (hw->mac.type == fm10k_mac_vf) {
417 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
421 /* Check VMDQ queue pool number */
422 if (vmdq_conf->nb_queue_pools >
423 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
424 vmdq_conf->nb_queue_pools > nb_rx_q) {
425 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
426 vmdq_conf->nb_queue_pools);
433 static const struct fm10k_txq_ops def_txq_ops = {
434 .reset = tx_queue_reset,
438 fm10k_dev_configure(struct rte_eth_dev *dev)
442 PMD_INIT_FUNC_TRACE();
444 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
445 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
446 /* multipe queue mode checking */
447 ret = fm10k_check_mq_mode(dev);
449 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
457 /* fls = find last set bit = 32 minus the number of leading zeros */
459 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
463 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
465 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
466 struct rte_eth_vmdq_rx_conf *vmdq_conf;
469 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
471 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
472 if (!vmdq_conf->pool_map[i].pools)
475 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
476 fm10k_mbx_unlock(hw);
481 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
483 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
485 /* Add default mac address */
486 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
487 MAIN_VSI_POOL_NUMBER);
491 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
493 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
494 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
495 uint32_t mrqc, *key, i, reta, j;
498 #define RSS_KEY_SIZE 40
499 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
500 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
501 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
502 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
503 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
504 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
507 if (dev->data->nb_rx_queues == 1 ||
508 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
509 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
510 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
514 /* random key is rss_intel_key (default) or user provided (rss_key) */
515 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
516 key = (uint32_t *)rss_intel_key;
518 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
520 /* Now fill our hash function seeds, 4 bytes at a time */
521 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
522 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
525 * Fill in redirection table
526 * The byte-swap is needed because NIC registers are in
527 * little-endian order.
530 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
531 if (j == dev->data->nb_rx_queues)
533 reta = (reta << CHAR_BIT) | j;
535 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
540 * Generate RSS hash based on packet types, TCP/UDP
541 * port numbers and/or IPv4/v6 src and dst addresses
543 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
545 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
546 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
547 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
548 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
549 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
550 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
551 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
552 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
553 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
556 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
561 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
565 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
567 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
570 for (i = 0; i < nb_lport_new; i++) {
571 /* Set unicast mode by default. App can change
572 * to other mode in other API func.
575 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
576 FM10K_XCAST_MODE_NONE);
577 fm10k_mbx_unlock(hw);
582 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
584 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
585 struct rte_eth_vmdq_rx_conf *vmdq_conf;
586 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
587 struct fm10k_macvlan_filter_info *macvlan;
588 uint16_t nb_queue_pools = 0; /* pool number in configuration */
589 uint16_t nb_lport_new;
591 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
592 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
594 fm10k_dev_rss_configure(dev);
596 /* only PF supports VMDQ */
597 if (hw->mac.type != fm10k_mac_pf)
600 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
601 nb_queue_pools = vmdq_conf->nb_queue_pools;
603 /* no pool number change, no need to update logic port and VLAN/MAC */
604 if (macvlan->nb_queue_pools == nb_queue_pools)
607 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
608 fm10k_dev_logic_port_update(dev, nb_lport_new);
610 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
611 memset(dev->data->mac_addrs, 0,
612 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
613 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
614 &dev->data->mac_addrs[0]);
615 memset(macvlan, 0, sizeof(*macvlan));
616 macvlan->nb_queue_pools = nb_queue_pools;
619 fm10k_dev_vmdq_rx_configure(dev);
621 fm10k_dev_pf_main_vsi_reset(dev);
625 fm10k_dev_tx_init(struct rte_eth_dev *dev)
627 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
629 struct fm10k_tx_queue *txq;
633 /* Disable TXINT to avoid possible interrupt */
634 for (i = 0; i < hw->mac.max_queues; i++)
635 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
636 3 << FM10K_TXINT_TIMER_SHIFT);
639 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
640 txq = dev->data->tx_queues[i];
641 base_addr = txq->hw_ring_phys_addr;
642 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
644 /* disable queue to avoid issues while updating state */
645 ret = tx_queue_disable(hw, i);
647 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
650 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
651 * register is read-only for VF.
653 if (fm10k_check_ftag(dev->device->devargs)) {
654 if (hw->mac.type == fm10k_mac_pf) {
655 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
656 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
657 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
659 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
664 /* set location and size for descriptor ring */
665 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
666 base_addr & UINT64_LOWER_32BITS_MASK);
667 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
668 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
669 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
671 /* assign default SGLORT for each TX queue by PF */
672 if (hw->mac.type == fm10k_mac_pf)
673 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
676 /* set up vector or scalar TX function as appropriate */
677 fm10k_set_tx_function(dev);
683 fm10k_dev_rx_init(struct rte_eth_dev *dev)
685 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686 struct fm10k_macvlan_filter_info *macvlan;
687 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
688 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
690 struct fm10k_rx_queue *rxq;
693 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
694 uint32_t logic_port = hw->mac.dglort_map;
696 uint16_t queue_stride = 0;
698 /* enable RXINT for interrupt mode */
700 if (rte_intr_dp_is_en(intr_handle)) {
701 for (; i < dev->data->nb_rx_queues; i++) {
702 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
703 if (hw->mac.type == fm10k_mac_pf)
704 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
706 FM10K_ITR_MASK_CLEAR);
708 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
710 FM10K_ITR_MASK_CLEAR);
713 /* Disable other RXINT to avoid possible interrupt */
714 for (; i < hw->mac.max_queues; i++)
715 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
716 3 << FM10K_RXINT_TIMER_SHIFT);
718 /* Setup RX queues */
719 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
720 rxq = dev->data->rx_queues[i];
721 base_addr = rxq->hw_ring_phys_addr;
722 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
724 /* disable queue to avoid issues while updating state */
725 ret = rx_queue_disable(hw, i);
727 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
731 /* Setup the Base and Length of the Rx Descriptor Ring */
732 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
733 base_addr & UINT64_LOWER_32BITS_MASK);
734 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
735 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
736 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
738 /* Configure the Rx buffer size for one buff without split */
739 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
740 RTE_PKTMBUF_HEADROOM);
741 /* As RX buffer is aligned to 512B within mbuf, some bytes are
742 * reserved for this purpose, and the worst case could be 511B.
743 * But SRR reg assumes all buffers have the same size. In order
744 * to fill the gap, we'll have to consider the worst case and
745 * assume 512B is reserved. If we don't do so, it's possible
746 * for HW to overwrite data to next mbuf.
748 buf_size -= FM10K_RX_DATABUF_ALIGN;
750 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
751 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
752 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
754 /* It adds dual VLAN length for supporting dual VLAN */
755 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
756 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
757 dev->data->dev_conf.rxmode.enable_scatter) {
759 dev->data->scattered_rx = 1;
760 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
761 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
762 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
765 /* Enable drop on empty, it's RO for VF */
766 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
767 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
769 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
770 FM10K_WRITE_FLUSH(hw);
773 /* Configure VMDQ/RSS if applicable */
774 fm10k_dev_mq_rx_configure(dev);
776 /* Decide the best RX function */
777 fm10k_set_rx_function(dev);
779 /* update RX_SGLORT for loopback suppress*/
780 if (hw->mac.type != fm10k_mac_pf)
782 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
783 if (macvlan->nb_queue_pools)
784 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
785 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
786 if (i && queue_stride && !(i % queue_stride))
788 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
795 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
797 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800 struct fm10k_rx_queue *rxq;
802 PMD_INIT_FUNC_TRACE();
804 if (rx_queue_id < dev->data->nb_rx_queues) {
805 rxq = dev->data->rx_queues[rx_queue_id];
806 err = rx_queue_reset(rxq);
807 if (err == -ENOMEM) {
808 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
810 } else if (err == -EINVAL) {
811 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
816 /* Setup the HW Rx Head and Tail Descriptor Pointers
817 * Note: this must be done AFTER the queue is enabled on real
818 * hardware, but BEFORE the queue is enabled when using the
819 * emulation platform. Do it in both places for now and remove
820 * this comment and the following two register writes when the
821 * emulation platform is no longer being used.
823 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
824 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
826 /* Set PF ownership flag for PF devices */
827 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
828 if (hw->mac.type == fm10k_mac_pf)
829 reg |= FM10K_RXQCTL_PF;
830 reg |= FM10K_RXQCTL_ENABLE;
831 /* enable RX queue */
832 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
833 FM10K_WRITE_FLUSH(hw);
835 /* Setup the HW Rx Head and Tail Descriptor Pointers
836 * Note: this must be done AFTER the queue is enabled
838 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
839 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
840 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
847 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
849 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
851 PMD_INIT_FUNC_TRACE();
853 if (rx_queue_id < dev->data->nb_rx_queues) {
854 /* Disable RX queue */
855 rx_queue_disable(hw, rx_queue_id);
857 /* Free mbuf and clean HW ring */
858 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
859 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
866 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
868 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869 /** @todo - this should be defined in the shared code */
870 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
871 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
874 PMD_INIT_FUNC_TRACE();
876 if (tx_queue_id < dev->data->nb_tx_queues) {
877 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
881 /* reset head and tail pointers */
882 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
883 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
885 /* enable TX queue */
886 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
887 FM10K_TXDCTL_ENABLE | txdctl);
888 FM10K_WRITE_FLUSH(hw);
889 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
897 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
899 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901 PMD_INIT_FUNC_TRACE();
903 if (tx_queue_id < dev->data->nb_tx_queues) {
904 tx_queue_disable(hw, tx_queue_id);
905 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
906 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
912 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
914 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
915 != FM10K_DGLORTMAP_NONE);
919 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
921 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924 PMD_INIT_FUNC_TRACE();
926 /* Return if it didn't acquire valid glort range */
927 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
931 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
932 FM10K_XCAST_MODE_PROMISC);
933 fm10k_mbx_unlock(hw);
935 if (status != FM10K_SUCCESS)
936 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
940 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
942 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
946 PMD_INIT_FUNC_TRACE();
948 /* Return if it didn't acquire valid glort range */
949 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
952 if (dev->data->all_multicast == 1)
953 mode = FM10K_XCAST_MODE_ALLMULTI;
955 mode = FM10K_XCAST_MODE_NONE;
958 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
960 fm10k_mbx_unlock(hw);
962 if (status != FM10K_SUCCESS)
963 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
967 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
969 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972 PMD_INIT_FUNC_TRACE();
974 /* Return if it didn't acquire valid glort range */
975 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
978 /* If promiscuous mode is enabled, it doesn't make sense to enable
979 * allmulticast and disable promiscuous since fm10k only can select
982 if (dev->data->promiscuous) {
983 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
984 "needn't enable allmulticast");
989 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
990 FM10K_XCAST_MODE_ALLMULTI);
991 fm10k_mbx_unlock(hw);
993 if (status != FM10K_SUCCESS)
994 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
998 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1000 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1003 PMD_INIT_FUNC_TRACE();
1005 /* Return if it didn't acquire valid glort range */
1006 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1009 if (dev->data->promiscuous) {
1010 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1011 "since promisc mode is enabled");
1016 /* Change mode to unicast mode */
1017 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1018 FM10K_XCAST_MODE_NONE);
1019 fm10k_mbx_unlock(hw);
1021 if (status != FM10K_SUCCESS)
1022 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1026 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1028 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1029 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1030 uint16_t nb_queue_pools;
1031 struct fm10k_macvlan_filter_info *macvlan;
1033 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1034 nb_queue_pools = macvlan->nb_queue_pools;
1035 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1036 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1038 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1039 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1040 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1042 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1043 /* Configure VMDQ/RSS DGlort Decoder */
1044 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1046 /* Flow Director configurations, only queue number is valid. */
1047 dglortdec = fls(dev->data->nb_rx_queues - 1);
1048 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1049 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1050 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1051 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1053 /* Invalidate all other GLORT entries */
1054 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1055 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1056 FM10K_DGLORTMAP_NONE);
1059 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1061 fm10k_dev_start(struct rte_eth_dev *dev)
1063 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066 PMD_INIT_FUNC_TRACE();
1068 /* stop, init, then start the hw */
1069 diag = fm10k_stop_hw(hw);
1070 if (diag != FM10K_SUCCESS) {
1071 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1075 diag = fm10k_init_hw(hw);
1076 if (diag != FM10K_SUCCESS) {
1077 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1081 diag = fm10k_start_hw(hw);
1082 if (diag != FM10K_SUCCESS) {
1083 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1087 diag = fm10k_dev_tx_init(dev);
1089 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1093 if (fm10k_dev_rxq_interrupt_setup(dev))
1096 diag = fm10k_dev_rx_init(dev);
1098 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1102 if (hw->mac.type == fm10k_mac_pf)
1103 fm10k_dev_dglort_map_configure(dev);
1105 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1106 struct fm10k_rx_queue *rxq;
1107 rxq = dev->data->rx_queues[i];
1109 if (rxq->rx_deferred_start)
1111 diag = fm10k_dev_rx_queue_start(dev, i);
1114 for (j = 0; j < i; ++j)
1115 rx_queue_clean(dev->data->rx_queues[j]);
1120 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1121 struct fm10k_tx_queue *txq;
1122 txq = dev->data->tx_queues[i];
1124 if (txq->tx_deferred_start)
1126 diag = fm10k_dev_tx_queue_start(dev, i);
1129 for (j = 0; j < i; ++j)
1130 tx_queue_clean(dev->data->tx_queues[j]);
1131 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1132 rx_queue_clean(dev->data->rx_queues[j]);
1137 /* Update default vlan when not in VMDQ mode */
1138 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1139 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1141 fm10k_link_update(dev, 0);
1147 fm10k_dev_stop(struct rte_eth_dev *dev)
1149 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1150 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1151 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1154 PMD_INIT_FUNC_TRACE();
1156 if (dev->data->tx_queues)
1157 for (i = 0; i < dev->data->nb_tx_queues; i++)
1158 fm10k_dev_tx_queue_stop(dev, i);
1160 if (dev->data->rx_queues)
1161 for (i = 0; i < dev->data->nb_rx_queues; i++)
1162 fm10k_dev_rx_queue_stop(dev, i);
1164 /* Disable datapath event */
1165 if (rte_intr_dp_is_en(intr_handle)) {
1166 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1167 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1168 3 << FM10K_RXINT_TIMER_SHIFT);
1169 if (hw->mac.type == fm10k_mac_pf)
1170 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1171 FM10K_ITR_MASK_SET);
1173 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1174 FM10K_ITR_MASK_SET);
1177 /* Clean datapath event and queue/vec mapping */
1178 rte_intr_efd_disable(intr_handle);
1179 rte_free(intr_handle->intr_vec);
1180 intr_handle->intr_vec = NULL;
1184 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1188 PMD_INIT_FUNC_TRACE();
1190 if (dev->data->tx_queues) {
1191 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1192 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1198 if (dev->data->rx_queues) {
1199 for (i = 0; i < dev->data->nb_rx_queues; i++)
1200 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1205 fm10k_dev_close(struct rte_eth_dev *dev)
1207 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1209 PMD_INIT_FUNC_TRACE();
1212 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1213 MAX_LPORT_NUM, false);
1214 fm10k_mbx_unlock(hw);
1216 /* allow 10ms for device to quiesce */
1217 rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1219 /* Stop mailbox service first */
1220 fm10k_close_mbx_service(hw);
1221 fm10k_dev_stop(dev);
1222 fm10k_dev_queue_release(dev);
1227 fm10k_link_update(struct rte_eth_dev *dev,
1228 __rte_unused int wait_to_complete)
1230 struct fm10k_dev_info *dev_info =
1231 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1232 PMD_INIT_FUNC_TRACE();
1234 /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1235 * leave the speed undefined since there is no 50Gbps Ethernet.
1237 dev->data->dev_link.link_speed = 0;
1238 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1239 dev->data->dev_link.link_status =
1240 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1245 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1246 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1251 if (xstats_names != NULL) {
1252 /* Note: limit checked in rte_eth_xstats_names() */
1255 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1256 snprintf(xstats_names[count].name,
1257 sizeof(xstats_names[count].name),
1258 "%s", fm10k_hw_stats_strings[count].name);
1262 /* PF queue stats */
1263 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1264 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1265 snprintf(xstats_names[count].name,
1266 sizeof(xstats_names[count].name),
1268 fm10k_hw_stats_rx_q_strings[i].name);
1271 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1272 snprintf(xstats_names[count].name,
1273 sizeof(xstats_names[count].name),
1275 fm10k_hw_stats_tx_q_strings[i].name);
1280 return FM10K_NB_XSTATS;
1284 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1287 struct fm10k_hw_stats *hw_stats =
1288 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1289 unsigned i, q, count = 0;
1291 if (n < FM10K_NB_XSTATS)
1292 return FM10K_NB_XSTATS;
1295 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1296 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1297 fm10k_hw_stats_strings[count].offset);
1298 xstats[count].id = count;
1302 /* PF queue stats */
1303 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1304 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1305 xstats[count].value =
1306 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1307 fm10k_hw_stats_rx_q_strings[i].offset);
1308 xstats[count].id = count;
1311 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1312 xstats[count].value =
1313 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1314 fm10k_hw_stats_tx_q_strings[i].offset);
1315 xstats[count].id = count;
1320 return FM10K_NB_XSTATS;
1324 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1326 uint64_t ipackets, opackets, ibytes, obytes;
1327 struct fm10k_hw *hw =
1328 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329 struct fm10k_hw_stats *hw_stats =
1330 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1333 PMD_INIT_FUNC_TRACE();
1335 fm10k_update_hw_stats(hw, hw_stats);
1337 ipackets = opackets = ibytes = obytes = 0;
1338 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1339 (i < hw->mac.max_queues); ++i) {
1340 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1341 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1342 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1343 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1344 ipackets += stats->q_ipackets[i];
1345 opackets += stats->q_opackets[i];
1346 ibytes += stats->q_ibytes[i];
1347 obytes += stats->q_obytes[i];
1349 stats->ipackets = ipackets;
1350 stats->opackets = opackets;
1351 stats->ibytes = ibytes;
1352 stats->obytes = obytes;
1357 fm10k_stats_reset(struct rte_eth_dev *dev)
1359 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1360 struct fm10k_hw_stats *hw_stats =
1361 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1363 PMD_INIT_FUNC_TRACE();
1365 memset(hw_stats, 0, sizeof(*hw_stats));
1366 fm10k_rebind_hw_stats(hw, hw_stats);
1370 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1371 struct rte_eth_dev_info *dev_info)
1373 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1374 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1376 PMD_INIT_FUNC_TRACE();
1378 dev_info->pci_dev = pdev;
1379 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1380 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1381 dev_info->max_rx_queues = hw->mac.max_queues;
1382 dev_info->max_tx_queues = hw->mac.max_queues;
1383 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1384 dev_info->max_hash_mac_addrs = 0;
1385 dev_info->max_vfs = pdev->max_vfs;
1386 dev_info->vmdq_pool_base = 0;
1387 dev_info->vmdq_queue_base = 0;
1388 dev_info->max_vmdq_pools = ETH_32_POOLS;
1389 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1390 dev_info->rx_offload_capa =
1391 DEV_RX_OFFLOAD_VLAN_STRIP |
1392 DEV_RX_OFFLOAD_IPV4_CKSUM |
1393 DEV_RX_OFFLOAD_UDP_CKSUM |
1394 DEV_RX_OFFLOAD_TCP_CKSUM;
1395 dev_info->tx_offload_capa =
1396 DEV_TX_OFFLOAD_VLAN_INSERT |
1397 DEV_TX_OFFLOAD_IPV4_CKSUM |
1398 DEV_TX_OFFLOAD_UDP_CKSUM |
1399 DEV_TX_OFFLOAD_TCP_CKSUM |
1400 DEV_TX_OFFLOAD_TCP_TSO;
1402 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1403 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1405 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1407 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1408 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1409 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1411 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1415 dev_info->default_txconf = (struct rte_eth_txconf) {
1417 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1418 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1419 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1421 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1422 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1423 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1426 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1427 .nb_max = FM10K_MAX_RX_DESC,
1428 .nb_min = FM10K_MIN_RX_DESC,
1429 .nb_align = FM10K_MULT_RX_DESC,
1432 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1433 .nb_max = FM10K_MAX_TX_DESC,
1434 .nb_min = FM10K_MIN_TX_DESC,
1435 .nb_align = FM10K_MULT_TX_DESC,
1436 .nb_seg_max = FM10K_TX_MAX_SEG,
1437 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1440 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1441 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1442 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1445 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1446 static const uint32_t *
1447 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1449 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1450 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1451 static uint32_t ptypes[] = {
1452 /* refers to rx_desc_to_ol_flags() */
1455 RTE_PTYPE_L3_IPV4_EXT,
1457 RTE_PTYPE_L3_IPV6_EXT,
1464 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1465 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1466 static uint32_t ptypes_vec[] = {
1467 /* refers to fm10k_desc_to_pktype_v() */
1469 RTE_PTYPE_L3_IPV4_EXT,
1471 RTE_PTYPE_L3_IPV6_EXT,
1474 RTE_PTYPE_TUNNEL_GENEVE,
1475 RTE_PTYPE_TUNNEL_NVGRE,
1476 RTE_PTYPE_TUNNEL_VXLAN,
1477 RTE_PTYPE_TUNNEL_GRE,
1487 static const uint32_t *
1488 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1495 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1498 uint16_t mac_num = 0;
1499 uint32_t vid_idx, vid_bit, mac_index;
1500 struct fm10k_hw *hw;
1501 struct fm10k_macvlan_filter_info *macvlan;
1502 struct rte_eth_dev_data *data = dev->data;
1504 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1507 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1508 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1512 if (vlan_id > ETH_VLAN_ID_MAX) {
1513 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1517 vid_idx = FM10K_VFTA_IDX(vlan_id);
1518 vid_bit = FM10K_VFTA_BIT(vlan_id);
1519 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1520 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1522 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1523 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1524 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1525 "in the VLAN filter table");
1530 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1531 fm10k_mbx_unlock(hw);
1532 if (result != FM10K_SUCCESS) {
1533 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1537 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1538 (result == FM10K_SUCCESS); mac_index++) {
1539 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1541 if (mac_num > macvlan->mac_num - 1) {
1542 PMD_INIT_LOG(ERR, "MAC address number "
1547 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1548 data->mac_addrs[mac_index].addr_bytes,
1550 fm10k_mbx_unlock(hw);
1553 if (result != FM10K_SUCCESS) {
1554 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1559 macvlan->vlan_num++;
1560 macvlan->vfta[vid_idx] |= vid_bit;
1562 macvlan->vlan_num--;
1563 macvlan->vfta[vid_idx] &= ~vid_bit;
1569 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1571 if (mask & ETH_VLAN_STRIP_MASK) {
1572 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1573 PMD_INIT_LOG(ERR, "VLAN stripping is "
1574 "always on in fm10k");
1577 if (mask & ETH_VLAN_EXTEND_MASK) {
1578 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1579 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1580 "supported in fm10k");
1583 if (mask & ETH_VLAN_FILTER_MASK) {
1584 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1585 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1591 /* Add/Remove a MAC address, and update filters to main VSI */
1592 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1593 const u8 *mac, bool add, uint32_t pool)
1595 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1596 struct fm10k_macvlan_filter_info *macvlan;
1599 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1601 if (pool != MAIN_VSI_POOL_NUMBER) {
1602 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1603 "mac to pool %u", pool);
1606 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1607 if (!macvlan->vfta[j])
1609 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1610 if (!(macvlan->vfta[j] & (1 << k)))
1612 if (i + 1 > macvlan->vlan_num) {
1613 PMD_INIT_LOG(ERR, "vlan number not match");
1617 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1618 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1619 fm10k_mbx_unlock(hw);
1625 /* Add/Remove a MAC address, and update filters to VMDQ */
1626 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1627 const u8 *mac, bool add, uint32_t pool)
1629 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1630 struct fm10k_macvlan_filter_info *macvlan;
1631 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1634 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1635 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1637 if (pool > macvlan->nb_queue_pools) {
1638 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1640 pool, macvlan->nb_queue_pools);
1643 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1644 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1647 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1648 vmdq_conf->pool_map[i].vlan_id, add, 0);
1649 fm10k_mbx_unlock(hw);
1653 /* Add/Remove a MAC address, and update filters */
1654 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1655 const u8 *mac, bool add, uint32_t pool)
1657 struct fm10k_macvlan_filter_info *macvlan;
1659 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1661 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1662 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1664 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1672 /* Add a MAC address, and update filters */
1674 fm10k_macaddr_add(struct rte_eth_dev *dev,
1675 struct ether_addr *mac_addr,
1679 struct fm10k_macvlan_filter_info *macvlan;
1681 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1682 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1683 macvlan->mac_vmdq_id[index] = pool;
1687 /* Remove a MAC address, and update filters */
1689 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1691 struct rte_eth_dev_data *data = dev->data;
1692 struct fm10k_macvlan_filter_info *macvlan;
1694 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1695 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1696 FALSE, macvlan->mac_vmdq_id[index]);
1697 macvlan->mac_vmdq_id[index] = 0;
1701 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1703 if ((request < min) || (request > max) || ((request % mult) != 0))
1711 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1713 if ((request < min) || (request > max) || ((div % request) != 0))
1720 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1722 uint16_t rx_free_thresh;
1724 if (conf->rx_free_thresh == 0)
1725 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1727 rx_free_thresh = conf->rx_free_thresh;
1729 /* make sure the requested threshold satisfies the constraints */
1730 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1731 FM10K_RX_FREE_THRESH_MAX(q),
1732 FM10K_RX_FREE_THRESH_DIV(q),
1734 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1735 "less than or equal to %u, "
1736 "greater than or equal to %u, "
1737 "and a divisor of %u",
1738 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1739 FM10K_RX_FREE_THRESH_MIN(q),
1740 FM10K_RX_FREE_THRESH_DIV(q));
1744 q->alloc_thresh = rx_free_thresh;
1745 q->drop_en = conf->rx_drop_en;
1746 q->rx_deferred_start = conf->rx_deferred_start;
1752 * Hardware requires specific alignment for Rx packet buffers. At
1753 * least one of the following two conditions must be satisfied.
1754 * 1. Address is 512B aligned
1755 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1757 * As such, the driver may need to adjust the DMA address within the
1758 * buffer by up to 512B.
1760 * return 1 if the element size is valid, otherwise return 0.
1763 mempool_element_size_valid(struct rte_mempool *mp)
1767 /* elt_size includes mbuf header and headroom */
1768 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1769 RTE_PKTMBUF_HEADROOM;
1771 /* account for up to 512B of alignment */
1772 min_size -= FM10K_RX_DATABUF_ALIGN;
1774 /* sanity check for overflow */
1775 if (min_size > mp->elt_size)
1783 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1784 uint16_t nb_desc, unsigned int socket_id,
1785 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1787 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788 struct fm10k_dev_info *dev_info =
1789 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1790 struct fm10k_rx_queue *q;
1791 const struct rte_memzone *mz;
1793 PMD_INIT_FUNC_TRACE();
1795 /* make sure the mempool element size can account for alignment. */
1796 if (!mempool_element_size_valid(mp)) {
1797 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1801 /* make sure a valid number of descriptors have been requested */
1802 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1803 FM10K_MULT_RX_DESC, nb_desc)) {
1804 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1805 "less than or equal to %"PRIu32", "
1806 "greater than or equal to %u, "
1807 "and a multiple of %u",
1808 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1809 FM10K_MULT_RX_DESC);
1814 * if this queue existed already, free the associated memory. The
1815 * queue cannot be reused in case we need to allocate memory on
1816 * different socket than was previously used.
1818 if (dev->data->rx_queues[queue_id] != NULL) {
1819 rx_queue_free(dev->data->rx_queues[queue_id]);
1820 dev->data->rx_queues[queue_id] = NULL;
1823 /* allocate memory for the queue structure */
1824 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1827 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1833 q->nb_desc = nb_desc;
1834 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1835 q->port_id = dev->data->port_id;
1836 q->queue_id = queue_id;
1837 q->tail_ptr = (volatile uint32_t *)
1838 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1839 if (handle_rxconf(q, conf))
1842 /* allocate memory for the software ring */
1843 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1844 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1845 RTE_CACHE_LINE_SIZE, socket_id);
1846 if (q->sw_ring == NULL) {
1847 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1853 * allocate memory for the hardware descriptor ring. A memzone large
1854 * enough to hold the maximum ring size is requested to allow for
1855 * resizing in later calls to the queue setup function.
1857 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1858 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1861 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1862 rte_free(q->sw_ring);
1866 q->hw_ring = mz->addr;
1867 q->hw_ring_phys_addr = mz->iova;
1869 /* Check if number of descs satisfied Vector requirement */
1870 if (!rte_is_power_of_2(nb_desc)) {
1871 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1872 "preconditions - canceling the feature for "
1873 "the whole port[%d]",
1874 q->queue_id, q->port_id);
1875 dev_info->rx_vec_allowed = false;
1877 fm10k_rxq_vec_setup(q);
1879 dev->data->rx_queues[queue_id] = q;
1884 fm10k_rx_queue_release(void *queue)
1886 PMD_INIT_FUNC_TRACE();
1888 rx_queue_free(queue);
1892 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1894 uint16_t tx_free_thresh;
1895 uint16_t tx_rs_thresh;
1897 /* constraint MACROs require that tx_free_thresh is configured
1898 * before tx_rs_thresh */
1899 if (conf->tx_free_thresh == 0)
1900 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1902 tx_free_thresh = conf->tx_free_thresh;
1904 /* make sure the requested threshold satisfies the constraints */
1905 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1906 FM10K_TX_FREE_THRESH_MAX(q),
1907 FM10K_TX_FREE_THRESH_DIV(q),
1909 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1910 "less than or equal to %u, "
1911 "greater than or equal to %u, "
1912 "and a divisor of %u",
1913 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1914 FM10K_TX_FREE_THRESH_MIN(q),
1915 FM10K_TX_FREE_THRESH_DIV(q));
1919 q->free_thresh = tx_free_thresh;
1921 if (conf->tx_rs_thresh == 0)
1922 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1924 tx_rs_thresh = conf->tx_rs_thresh;
1926 q->tx_deferred_start = conf->tx_deferred_start;
1928 /* make sure the requested threshold satisfies the constraints */
1929 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1930 FM10K_TX_RS_THRESH_MAX(q),
1931 FM10K_TX_RS_THRESH_DIV(q),
1933 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1934 "less than or equal to %u, "
1935 "greater than or equal to %u, "
1936 "and a divisor of %u",
1937 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1938 FM10K_TX_RS_THRESH_MIN(q),
1939 FM10K_TX_RS_THRESH_DIV(q));
1943 q->rs_thresh = tx_rs_thresh;
1949 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1950 uint16_t nb_desc, unsigned int socket_id,
1951 const struct rte_eth_txconf *conf)
1953 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1954 struct fm10k_tx_queue *q;
1955 const struct rte_memzone *mz;
1957 PMD_INIT_FUNC_TRACE();
1959 /* make sure a valid number of descriptors have been requested */
1960 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1961 FM10K_MULT_TX_DESC, nb_desc)) {
1962 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1963 "less than or equal to %"PRIu32", "
1964 "greater than or equal to %u, "
1965 "and a multiple of %u",
1966 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1967 FM10K_MULT_TX_DESC);
1972 * if this queue existed already, free the associated memory. The
1973 * queue cannot be reused in case we need to allocate memory on
1974 * different socket than was previously used.
1976 if (dev->data->tx_queues[queue_id] != NULL) {
1977 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1980 dev->data->tx_queues[queue_id] = NULL;
1983 /* allocate memory for the queue structure */
1984 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1987 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1992 q->nb_desc = nb_desc;
1993 q->port_id = dev->data->port_id;
1994 q->queue_id = queue_id;
1995 q->txq_flags = conf->txq_flags;
1996 q->ops = &def_txq_ops;
1997 q->tail_ptr = (volatile uint32_t *)
1998 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1999 if (handle_txconf(q, conf))
2002 /* allocate memory for the software ring */
2003 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2004 nb_desc * sizeof(struct rte_mbuf *),
2005 RTE_CACHE_LINE_SIZE, socket_id);
2006 if (q->sw_ring == NULL) {
2007 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2013 * allocate memory for the hardware descriptor ring. A memzone large
2014 * enough to hold the maximum ring size is requested to allow for
2015 * resizing in later calls to the queue setup function.
2017 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2018 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2021 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2022 rte_free(q->sw_ring);
2026 q->hw_ring = mz->addr;
2027 q->hw_ring_phys_addr = mz->iova;
2030 * allocate memory for the RS bit tracker. Enough slots to hold the
2031 * descriptor index for each RS bit needing to be set are required.
2033 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2034 ((nb_desc + 1) / q->rs_thresh) *
2036 RTE_CACHE_LINE_SIZE, socket_id);
2037 if (q->rs_tracker.list == NULL) {
2038 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2039 rte_free(q->sw_ring);
2044 dev->data->tx_queues[queue_id] = q;
2049 fm10k_tx_queue_release(void *queue)
2051 struct fm10k_tx_queue *q = queue;
2052 PMD_INIT_FUNC_TRACE();
2058 fm10k_reta_update(struct rte_eth_dev *dev,
2059 struct rte_eth_rss_reta_entry64 *reta_conf,
2062 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2063 uint16_t i, j, idx, shift;
2067 PMD_INIT_FUNC_TRACE();
2069 if (reta_size > FM10K_MAX_RSS_INDICES) {
2070 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2071 "(%d) doesn't match the number hardware can supported "
2072 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2077 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2078 * 128-entries in 32 registers
2080 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2081 idx = i / RTE_RETA_GROUP_SIZE;
2082 shift = i % RTE_RETA_GROUP_SIZE;
2083 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2084 BIT_MASK_PER_UINT32);
2089 if (mask != BIT_MASK_PER_UINT32)
2090 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2092 for (j = 0; j < CHARS_PER_UINT32; j++) {
2093 if (mask & (0x1 << j)) {
2095 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2096 reta |= reta_conf[idx].reta[shift + j] <<
2100 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2107 fm10k_reta_query(struct rte_eth_dev *dev,
2108 struct rte_eth_rss_reta_entry64 *reta_conf,
2111 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2112 uint16_t i, j, idx, shift;
2116 PMD_INIT_FUNC_TRACE();
2118 if (reta_size < FM10K_MAX_RSS_INDICES) {
2119 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2120 "(%d) doesn't match the number hardware can supported "
2121 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2126 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2127 * 128-entries in 32 registers
2129 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2130 idx = i / RTE_RETA_GROUP_SIZE;
2131 shift = i % RTE_RETA_GROUP_SIZE;
2132 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2133 BIT_MASK_PER_UINT32);
2137 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2138 for (j = 0; j < CHARS_PER_UINT32; j++) {
2139 if (mask & (0x1 << j))
2140 reta_conf[idx].reta[shift + j] = ((reta >>
2141 CHAR_BIT * j) & UINT8_MAX);
2149 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2150 struct rte_eth_rss_conf *rss_conf)
2152 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2153 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2155 uint64_t hf = rss_conf->rss_hf;
2158 PMD_INIT_FUNC_TRACE();
2160 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2161 FM10K_RSSRK_ENTRIES_PER_REG))
2168 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2169 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2170 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2171 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2172 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2173 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2174 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2175 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2176 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2178 /* If the mapping doesn't fit any supported, return */
2183 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2184 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2186 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2192 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2193 struct rte_eth_rss_conf *rss_conf)
2195 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2201 PMD_INIT_FUNC_TRACE();
2203 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2204 FM10K_RSSRK_ENTRIES_PER_REG))
2208 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2209 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2211 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2213 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2214 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2215 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2216 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2217 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2218 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2219 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2220 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2221 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2223 rss_conf->rss_hf = hf;
2229 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2231 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2232 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2234 /* Bind all local non-queue interrupt to vector 0 */
2235 int_map |= FM10K_MISC_VEC_ID;
2237 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2238 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2239 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2240 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2241 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2242 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2244 /* Enable misc causes */
2245 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2246 FM10K_EIMR_ENABLE(THI_FAULT) |
2247 FM10K_EIMR_ENABLE(FUM_FAULT) |
2248 FM10K_EIMR_ENABLE(MAILBOX) |
2249 FM10K_EIMR_ENABLE(SWITCHREADY) |
2250 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2251 FM10K_EIMR_ENABLE(SRAMERROR) |
2252 FM10K_EIMR_ENABLE(VFLR));
2255 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2256 FM10K_ITR_MASK_CLEAR);
2257 FM10K_WRITE_FLUSH(hw);
2261 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2263 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2264 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2266 int_map |= FM10K_MISC_VEC_ID;
2268 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2269 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2270 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2271 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2272 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2273 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2275 /* Disable misc causes */
2276 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2277 FM10K_EIMR_DISABLE(THI_FAULT) |
2278 FM10K_EIMR_DISABLE(FUM_FAULT) |
2279 FM10K_EIMR_DISABLE(MAILBOX) |
2280 FM10K_EIMR_DISABLE(SWITCHREADY) |
2281 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2282 FM10K_EIMR_DISABLE(SRAMERROR) |
2283 FM10K_EIMR_DISABLE(VFLR));
2286 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2287 FM10K_WRITE_FLUSH(hw);
2291 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2293 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2294 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2296 /* Bind all local non-queue interrupt to vector 0 */
2297 int_map |= FM10K_MISC_VEC_ID;
2299 /* Only INT 0 available, other 15 are reserved. */
2300 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2303 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2304 FM10K_ITR_MASK_CLEAR);
2305 FM10K_WRITE_FLUSH(hw);
2309 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2311 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2312 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2314 int_map |= FM10K_MISC_VEC_ID;
2316 /* Only INT 0 available, other 15 are reserved. */
2317 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2320 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2321 FM10K_WRITE_FLUSH(hw);
2325 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2327 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2331 if (hw->mac.type == fm10k_mac_pf)
2332 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2333 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2335 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2336 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2337 rte_intr_enable(&pdev->intr_handle);
2342 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2344 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2345 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2348 if (hw->mac.type == fm10k_mac_pf)
2349 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2350 FM10K_ITR_MASK_SET);
2352 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2353 FM10K_ITR_MASK_SET);
2358 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2360 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2361 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2362 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2363 uint32_t intr_vector, vec;
2367 /* fm10k needs one separate interrupt for mailbox,
2368 * so only drivers which support multiple interrupt vectors
2369 * e.g. vfio-pci can work for fm10k interrupt mode
2371 if (!rte_intr_cap_multiple(intr_handle) ||
2372 dev->data->dev_conf.intr_conf.rxq == 0)
2375 intr_vector = dev->data->nb_rx_queues;
2377 /* disable interrupt first */
2378 rte_intr_disable(intr_handle);
2379 if (hw->mac.type == fm10k_mac_pf)
2380 fm10k_dev_disable_intr_pf(dev);
2382 fm10k_dev_disable_intr_vf(dev);
2384 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2385 PMD_INIT_LOG(ERR, "Failed to init event fd");
2389 if (rte_intr_dp_is_en(intr_handle) && !result) {
2390 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2391 dev->data->nb_rx_queues * sizeof(int), 0);
2392 if (intr_handle->intr_vec) {
2393 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2394 queue_id < dev->data->nb_rx_queues;
2396 intr_handle->intr_vec[queue_id] = vec;
2397 if (vec < intr_handle->nb_efd - 1
2398 + FM10K_RX_VEC_START)
2402 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2403 " intr_vec", dev->data->nb_rx_queues);
2404 rte_intr_efd_disable(intr_handle);
2409 if (hw->mac.type == fm10k_mac_pf)
2410 fm10k_dev_enable_intr_pf(dev);
2412 fm10k_dev_enable_intr_vf(dev);
2413 rte_intr_enable(intr_handle);
2414 hw->mac.ops.update_int_moderator(hw);
2419 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2421 struct fm10k_fault fault;
2423 const char *estr = "Unknown error";
2425 /* Process PCA fault */
2426 if (eicr & FM10K_EICR_PCA_FAULT) {
2427 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2430 switch (fault.type) {
2432 estr = "PCA_NO_FAULT"; break;
2433 case PCA_UNMAPPED_ADDR:
2434 estr = "PCA_UNMAPPED_ADDR"; break;
2435 case PCA_BAD_QACCESS_PF:
2436 estr = "PCA_BAD_QACCESS_PF"; break;
2437 case PCA_BAD_QACCESS_VF:
2438 estr = "PCA_BAD_QACCESS_VF"; break;
2439 case PCA_MALICIOUS_REQ:
2440 estr = "PCA_MALICIOUS_REQ"; break;
2441 case PCA_POISONED_TLP:
2442 estr = "PCA_POISONED_TLP"; break;
2444 estr = "PCA_TLP_ABORT"; break;
2448 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2449 estr, fault.func ? "VF" : "PF", fault.func,
2450 fault.address, fault.specinfo);
2453 /* Process THI fault */
2454 if (eicr & FM10K_EICR_THI_FAULT) {
2455 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2458 switch (fault.type) {
2460 estr = "THI_NO_FAULT"; break;
2461 case THI_MAL_DIS_Q_FAULT:
2462 estr = "THI_MAL_DIS_Q_FAULT"; break;
2466 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2467 estr, fault.func ? "VF" : "PF", fault.func,
2468 fault.address, fault.specinfo);
2471 /* Process FUM fault */
2472 if (eicr & FM10K_EICR_FUM_FAULT) {
2473 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2476 switch (fault.type) {
2478 estr = "FUM_NO_FAULT"; break;
2479 case FUM_UNMAPPED_ADDR:
2480 estr = "FUM_UNMAPPED_ADDR"; break;
2481 case FUM_POISONED_TLP:
2482 estr = "FUM_POISONED_TLP"; break;
2483 case FUM_BAD_VF_QACCESS:
2484 estr = "FUM_BAD_VF_QACCESS"; break;
2485 case FUM_ADD_DECODE_ERR:
2486 estr = "FUM_ADD_DECODE_ERR"; break;
2488 estr = "FUM_RO_ERROR"; break;
2489 case FUM_QPRC_CRC_ERROR:
2490 estr = "FUM_QPRC_CRC_ERROR"; break;
2491 case FUM_CSR_TIMEOUT:
2492 estr = "FUM_CSR_TIMEOUT"; break;
2493 case FUM_INVALID_TYPE:
2494 estr = "FUM_INVALID_TYPE"; break;
2495 case FUM_INVALID_LENGTH:
2496 estr = "FUM_INVALID_LENGTH"; break;
2497 case FUM_INVALID_BE:
2498 estr = "FUM_INVALID_BE"; break;
2499 case FUM_INVALID_ALIGN:
2500 estr = "FUM_INVALID_ALIGN"; break;
2504 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2505 estr, fault.func ? "VF" : "PF", fault.func,
2506 fault.address, fault.specinfo);
2511 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2516 * PF interrupt handler triggered by NIC for handling specific interrupt.
2519 * Pointer to interrupt handle.
2521 * The address of parameter (struct rte_eth_dev *) regsitered before.
2527 fm10k_dev_interrupt_handler_pf(void *param)
2529 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2530 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2531 uint32_t cause, status;
2532 struct fm10k_dev_info *dev_info =
2533 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2537 if (hw->mac.type != fm10k_mac_pf)
2540 cause = FM10K_READ_REG(hw, FM10K_EICR);
2542 /* Handle PCI fault cases */
2543 if (cause & FM10K_EICR_FAULT_MASK) {
2544 PMD_INIT_LOG(ERR, "INT: find fault!");
2545 fm10k_dev_handle_fault(hw, cause);
2548 /* Handle switch up/down */
2549 if (cause & FM10K_EICR_SWITCHNOTREADY)
2550 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2552 if (cause & FM10K_EICR_SWITCHREADY) {
2553 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2554 if (dev_info->sm_down == 1) {
2557 /* For recreating logical ports */
2558 status_mbx = hw->mac.ops.update_lport_state(hw,
2559 hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2560 if (status_mbx == FM10K_SUCCESS)
2562 "INT: Recreated Logical port");
2565 "INT: Logical ports weren't recreated");
2567 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2568 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2569 if (status_mbx != FM10K_SUCCESS)
2570 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2572 fm10k_mbx_unlock(hw);
2574 /* first clear the internal SW recording structure */
2575 if (!(dev->data->dev_conf.rxmode.mq_mode &
2576 ETH_MQ_RX_VMDQ_FLAG))
2577 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2580 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2581 MAIN_VSI_POOL_NUMBER);
2584 * Add default mac address and vlan for the logical
2585 * ports that have been created, leave to the
2586 * application to fully recover Rx filtering.
2588 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2589 MAIN_VSI_POOL_NUMBER);
2591 if (!(dev->data->dev_conf.rxmode.mq_mode &
2592 ETH_MQ_RX_VMDQ_FLAG))
2593 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2596 dev_info->sm_down = 0;
2597 _rte_eth_dev_callback_process(dev,
2598 RTE_ETH_EVENT_INTR_LSC,
2603 /* Handle mailbox message */
2605 err = hw->mbx.ops.process(hw, &hw->mbx);
2606 fm10k_mbx_unlock(hw);
2608 if (err == FM10K_ERR_RESET_REQUESTED) {
2609 PMD_INIT_LOG(INFO, "INT: Switch is down");
2610 dev_info->sm_down = 1;
2611 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2615 /* Handle SRAM error */
2616 if (cause & FM10K_EICR_SRAMERROR) {
2617 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2619 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2620 /* Write to clear pending bits */
2621 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2623 /* Todo: print out error message after shared code updates */
2626 /* Clear these 3 events if having any */
2627 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2628 FM10K_EICR_SWITCHREADY;
2630 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2632 /* Re-enable interrupt from device side */
2633 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2634 FM10K_ITR_MASK_CLEAR);
2635 /* Re-enable interrupt from host side */
2636 rte_intr_enable(dev->intr_handle);
2640 * VF interrupt handler triggered by NIC for handling specific interrupt.
2643 * Pointer to interrupt handle.
2645 * The address of parameter (struct rte_eth_dev *) regsitered before.
2651 fm10k_dev_interrupt_handler_vf(void *param)
2653 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2654 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2655 struct fm10k_mbx_info *mbx = &hw->mbx;
2656 struct fm10k_dev_info *dev_info =
2657 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2658 const enum fm10k_mbx_state state = mbx->state;
2661 if (hw->mac.type != fm10k_mac_vf)
2664 /* Handle mailbox message if lock is acquired */
2666 hw->mbx.ops.process(hw, &hw->mbx);
2667 fm10k_mbx_unlock(hw);
2669 if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2670 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2673 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2675 fm10k_mbx_unlock(hw);
2677 /* Setting reset flag */
2678 dev_info->sm_down = 1;
2679 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2683 if (dev_info->sm_down == 1 &&
2684 hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2685 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2687 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2688 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2689 if (status_mbx != FM10K_SUCCESS)
2690 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2691 fm10k_mbx_unlock(hw);
2693 /* first clear the internal SW recording structure */
2694 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2695 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2696 MAIN_VSI_POOL_NUMBER);
2699 * Add default mac address and vlan for the logical ports that
2700 * have been created, leave to the application to fully recover
2703 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2704 MAIN_VSI_POOL_NUMBER);
2705 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2707 dev_info->sm_down = 0;
2708 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2712 /* Re-enable interrupt from device side */
2713 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2714 FM10K_ITR_MASK_CLEAR);
2715 /* Re-enable interrupt from host side */
2716 rte_intr_enable(dev->intr_handle);
2719 /* Mailbox message handler in VF */
2720 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2721 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2722 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2723 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2724 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2728 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2732 /* Initialize mailbox lock */
2733 fm10k_mbx_initlock(hw);
2735 /* Replace default message handler with new ones */
2736 if (hw->mac.type == fm10k_mac_vf)
2737 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2740 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2744 /* Connect to SM for PF device or PF for VF device */
2745 return hw->mbx.ops.connect(hw, &hw->mbx);
2749 fm10k_close_mbx_service(struct fm10k_hw *hw)
2751 /* Disconnect from SM for PF device or PF for VF device */
2752 hw->mbx.ops.disconnect(hw, &hw->mbx);
2755 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2756 .dev_configure = fm10k_dev_configure,
2757 .dev_start = fm10k_dev_start,
2758 .dev_stop = fm10k_dev_stop,
2759 .dev_close = fm10k_dev_close,
2760 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2761 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2762 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2763 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2764 .stats_get = fm10k_stats_get,
2765 .xstats_get = fm10k_xstats_get,
2766 .xstats_get_names = fm10k_xstats_get_names,
2767 .stats_reset = fm10k_stats_reset,
2768 .xstats_reset = fm10k_stats_reset,
2769 .link_update = fm10k_link_update,
2770 .dev_infos_get = fm10k_dev_infos_get,
2771 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2772 .vlan_filter_set = fm10k_vlan_filter_set,
2773 .vlan_offload_set = fm10k_vlan_offload_set,
2774 .mac_addr_add = fm10k_macaddr_add,
2775 .mac_addr_remove = fm10k_macaddr_remove,
2776 .rx_queue_start = fm10k_dev_rx_queue_start,
2777 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2778 .tx_queue_start = fm10k_dev_tx_queue_start,
2779 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2780 .rx_queue_setup = fm10k_rx_queue_setup,
2781 .rx_queue_release = fm10k_rx_queue_release,
2782 .tx_queue_setup = fm10k_tx_queue_setup,
2783 .tx_queue_release = fm10k_tx_queue_release,
2784 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2785 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2786 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2787 .reta_update = fm10k_reta_update,
2788 .reta_query = fm10k_reta_query,
2789 .rss_hash_update = fm10k_rss_hash_update,
2790 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2793 static int ftag_check_handler(__rte_unused const char *key,
2794 const char *value, __rte_unused void *opaque)
2796 if (strcmp(value, "1"))
2803 fm10k_check_ftag(struct rte_devargs *devargs)
2805 struct rte_kvargs *kvlist;
2806 const char *ftag_key = "enable_ftag";
2808 if (devargs == NULL)
2811 kvlist = rte_kvargs_parse(devargs->args, NULL);
2815 if (!rte_kvargs_count(kvlist, ftag_key)) {
2816 rte_kvargs_free(kvlist);
2819 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2820 if (rte_kvargs_process(kvlist, ftag_key,
2821 ftag_check_handler, NULL) < 0) {
2822 rte_kvargs_free(kvlist);
2825 rte_kvargs_free(kvlist);
2831 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2835 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2840 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2841 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2852 static void __attribute__((cold))
2853 fm10k_set_tx_function(struct rte_eth_dev *dev)
2855 struct fm10k_tx_queue *txq;
2858 uint16_t tx_ftag_en = 0;
2860 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2861 /* primary process has set the ftag flag and txq_flags */
2862 txq = dev->data->tx_queues[0];
2863 if (fm10k_tx_vec_condition_check(txq)) {
2864 dev->tx_pkt_burst = fm10k_xmit_pkts;
2865 dev->tx_pkt_prepare = fm10k_prep_pkts;
2866 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2868 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2869 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2870 dev->tx_pkt_prepare = NULL;
2875 if (fm10k_check_ftag(dev->device->devargs))
2878 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2879 txq = dev->data->tx_queues[i];
2880 txq->tx_ftag_en = tx_ftag_en;
2881 /* Check if Vector Tx is satisfied */
2882 if (fm10k_tx_vec_condition_check(txq))
2887 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2888 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2889 txq = dev->data->tx_queues[i];
2890 fm10k_txq_vec_setup(txq);
2892 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2893 dev->tx_pkt_prepare = NULL;
2895 dev->tx_pkt_burst = fm10k_xmit_pkts;
2896 dev->tx_pkt_prepare = fm10k_prep_pkts;
2897 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2901 static void __attribute__((cold))
2902 fm10k_set_rx_function(struct rte_eth_dev *dev)
2904 struct fm10k_dev_info *dev_info =
2905 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2906 uint16_t i, rx_using_sse;
2907 uint16_t rx_ftag_en = 0;
2909 if (fm10k_check_ftag(dev->device->devargs))
2912 /* In order to allow Vector Rx there are a few configuration
2913 * conditions to be met.
2915 if (!fm10k_rx_vec_condition_check(dev) &&
2916 dev_info->rx_vec_allowed && !rx_ftag_en) {
2917 if (dev->data->scattered_rx)
2918 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2920 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2921 } else if (dev->data->scattered_rx)
2922 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2924 dev->rx_pkt_burst = fm10k_recv_pkts;
2927 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2928 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2931 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2933 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2935 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2938 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2939 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2941 rxq->rx_using_sse = rx_using_sse;
2942 rxq->rx_ftag_en = rx_ftag_en;
2947 fm10k_params_init(struct rte_eth_dev *dev)
2949 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2950 struct fm10k_dev_info *info =
2951 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2953 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2954 * there is no way to get link status without reading BAR4. Until this
2955 * works, assume we have maximum bandwidth.
2956 * @todo - fix bus info
2958 hw->bus_caps.speed = fm10k_bus_speed_8000;
2959 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2960 hw->bus_caps.payload = fm10k_bus_payload_512;
2961 hw->bus.speed = fm10k_bus_speed_8000;
2962 hw->bus.width = fm10k_bus_width_pcie_x8;
2963 hw->bus.payload = fm10k_bus_payload_256;
2965 info->rx_vec_allowed = true;
2969 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2971 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2972 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2973 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2975 struct fm10k_macvlan_filter_info *macvlan;
2977 PMD_INIT_FUNC_TRACE();
2979 dev->dev_ops = &fm10k_eth_dev_ops;
2980 dev->rx_pkt_burst = &fm10k_recv_pkts;
2981 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2982 dev->tx_pkt_prepare = &fm10k_prep_pkts;
2985 * Primary process does the whole initialization, for secondary
2986 * processes, we just select the same Rx and Tx function as primary.
2988 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2989 fm10k_set_rx_function(dev);
2990 fm10k_set_tx_function(dev);
2994 rte_eth_copy_pci_info(dev, pdev);
2996 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2997 memset(macvlan, 0, sizeof(*macvlan));
2998 /* Vendor and Device ID need to be set before init of shared code */
2999 memset(hw, 0, sizeof(*hw));
3000 hw->device_id = pdev->id.device_id;
3001 hw->vendor_id = pdev->id.vendor_id;
3002 hw->subsystem_device_id = pdev->id.subsystem_device_id;
3003 hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3004 hw->revision_id = 0;
3005 hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3006 if (hw->hw_addr == NULL) {
3007 PMD_INIT_LOG(ERR, "Bad mem resource."
3008 " Try to blacklist unused devices.");
3012 /* Store fm10k_adapter pointer */
3013 hw->back = dev->data->dev_private;
3015 /* Initialize the shared code */
3016 diag = fm10k_init_shared_code(hw);
3017 if (diag != FM10K_SUCCESS) {
3018 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3022 /* Initialize parameters */
3023 fm10k_params_init(dev);
3025 /* Initialize the hw */
3026 diag = fm10k_init_hw(hw);
3027 if (diag != FM10K_SUCCESS) {
3028 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3032 /* Initialize MAC address(es) */
3033 dev->data->mac_addrs = rte_zmalloc("fm10k",
3034 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3035 if (dev->data->mac_addrs == NULL) {
3036 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3040 diag = fm10k_read_mac_addr(hw);
3042 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3043 &dev->data->mac_addrs[0]);
3045 if (diag != FM10K_SUCCESS ||
3046 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3048 /* Generate a random addr */
3049 eth_random_addr(hw->mac.addr);
3050 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3051 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3052 &dev->data->mac_addrs[0]);
3055 /* Reset the hw statistics */
3056 fm10k_stats_reset(dev);
3059 diag = fm10k_reset_hw(hw);
3060 if (diag != FM10K_SUCCESS) {
3061 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3065 /* Setup mailbox service */
3066 diag = fm10k_setup_mbx_service(hw);
3067 if (diag != FM10K_SUCCESS) {
3068 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3072 /*PF/VF has different interrupt handling mechanism */
3073 if (hw->mac.type == fm10k_mac_pf) {
3074 /* register callback func to eal lib */
3075 rte_intr_callback_register(intr_handle,
3076 fm10k_dev_interrupt_handler_pf, (void *)dev);
3078 /* enable MISC interrupt */
3079 fm10k_dev_enable_intr_pf(dev);
3081 rte_intr_callback_register(intr_handle,
3082 fm10k_dev_interrupt_handler_vf, (void *)dev);
3084 fm10k_dev_enable_intr_vf(dev);
3087 /* Enable intr after callback registered */
3088 rte_intr_enable(intr_handle);
3090 hw->mac.ops.update_int_moderator(hw);
3092 /* Make sure Switch Manager is ready before going forward. */
3093 if (hw->mac.type == fm10k_mac_pf) {
3094 int switch_ready = 0;
3096 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3098 hw->mac.ops.get_host_state(hw, &switch_ready);
3099 fm10k_mbx_unlock(hw);
3102 /* Delay some time to acquire async LPORT_MAP info. */
3103 rte_delay_us(WAIT_SWITCH_MSG_US);
3106 if (switch_ready == 0) {
3107 PMD_INIT_LOG(ERR, "switch is not ready");
3113 * Below function will trigger operations on mailbox, acquire lock to
3114 * avoid race condition from interrupt handler. Operations on mailbox
3115 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3116 * will handle and generate an interrupt to our side. Then, FIFO in
3117 * mailbox will be touched.
3120 /* Enable port first */
3121 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3124 /* Set unicast mode by default. App can change to other mode in other
3127 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3128 FM10K_XCAST_MODE_NONE);
3130 fm10k_mbx_unlock(hw);
3132 /* Make sure default VID is ready before going forward. */
3133 if (hw->mac.type == fm10k_mac_pf) {
3134 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3135 if (hw->mac.default_vid)
3137 /* Delay some time to acquire async port VLAN info. */
3138 rte_delay_us(WAIT_SWITCH_MSG_US);
3141 if (!hw->mac.default_vid) {
3142 PMD_INIT_LOG(ERR, "default VID is not ready");
3147 /* Add default mac address */
3148 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3149 MAIN_VSI_POOL_NUMBER);
3155 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3157 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3158 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3159 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3160 PMD_INIT_FUNC_TRACE();
3162 /* only uninitialize in the primary process */
3163 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3166 /* safe to close dev here */
3167 fm10k_dev_close(dev);
3169 dev->dev_ops = NULL;
3170 dev->rx_pkt_burst = NULL;
3171 dev->tx_pkt_burst = NULL;
3173 /* disable uio/vfio intr */
3174 rte_intr_disable(intr_handle);
3176 /*PF/VF has different interrupt handling mechanism */
3177 if (hw->mac.type == fm10k_mac_pf) {
3178 /* disable interrupt */
3179 fm10k_dev_disable_intr_pf(dev);
3181 /* unregister callback func to eal lib */
3182 rte_intr_callback_unregister(intr_handle,
3183 fm10k_dev_interrupt_handler_pf, (void *)dev);
3185 /* disable interrupt */
3186 fm10k_dev_disable_intr_vf(dev);
3188 rte_intr_callback_unregister(intr_handle,
3189 fm10k_dev_interrupt_handler_vf, (void *)dev);
3192 /* free mac memory */
3193 if (dev->data->mac_addrs) {
3194 rte_free(dev->data->mac_addrs);
3195 dev->data->mac_addrs = NULL;
3198 memset(hw, 0, sizeof(*hw));
3203 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3204 struct rte_pci_device *pci_dev)
3206 return rte_eth_dev_pci_generic_probe(pci_dev,
3207 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3210 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3212 return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3216 * The set of PCI devices this driver supports. This driver will enable both PF
3217 * and SRIOV-VF devices.
3219 static const struct rte_pci_id pci_id_fm10k_map[] = {
3220 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3221 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3222 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3223 { .vendor_id = 0, /* sentinel */ },
3226 static struct rte_pci_driver rte_pmd_fm10k = {
3227 .id_table = pci_id_fm10k_map,
3228 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3229 RTE_PCI_DRV_IOVA_AS_VA,
3230 .probe = eth_fm10k_pci_probe,
3231 .remove = eth_fm10k_pci_remove,
3234 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3235 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3236 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");