1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
15 #include "base/fm10k_api.h"
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
21 #define MAIN_VSI_POOL_NUMBER 0
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US 100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM 128
38 #define GLORT_FD_Q_BASE 0x40
39 #define GLORT_PF_MASK 0xFFC0
40 #define GLORT_FD_MASK GLORT_PF_MASK
41 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
43 int fm10k_logtype_init;
44 int fm10k_logtype_driver;
46 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
47 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
48 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
49 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
50 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
51 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
53 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
54 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
55 const u8 *mac, bool add, uint32_t pool);
56 static void fm10k_tx_queue_release(void *queue);
57 static void fm10k_rx_queue_release(void *queue);
58 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
59 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
60 static int fm10k_check_ftag(struct rte_devargs *devargs);
61 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
63 struct fm10k_xstats_name_off {
64 char name[RTE_ETH_XSTATS_NAME_SIZE];
68 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
69 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
70 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
71 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
72 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
73 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
74 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
75 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
76 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
80 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
81 sizeof(fm10k_hw_stats_strings[0]))
83 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
84 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
85 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
86 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
89 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
90 sizeof(fm10k_hw_stats_rx_q_strings[0]))
92 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
93 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
94 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
97 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
98 sizeof(fm10k_hw_stats_tx_q_strings[0]))
100 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
101 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
103 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
106 fm10k_mbx_initlock(struct fm10k_hw *hw)
108 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
112 fm10k_mbx_lock(struct fm10k_hw *hw)
114 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
115 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
119 fm10k_mbx_unlock(struct fm10k_hw *hw)
121 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
124 /* Stubs needed for linkage when vPMD is disabled */
125 int __attribute__((weak))
126 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
131 uint16_t __attribute__((weak))
133 __rte_unused void *rx_queue,
134 __rte_unused struct rte_mbuf **rx_pkts,
135 __rte_unused uint16_t nb_pkts)
140 uint16_t __attribute__((weak))
141 fm10k_recv_scattered_pkts_vec(
142 __rte_unused void *rx_queue,
143 __rte_unused struct rte_mbuf **rx_pkts,
144 __rte_unused uint16_t nb_pkts)
149 int __attribute__((weak))
150 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
156 void __attribute__((weak))
157 fm10k_rx_queue_release_mbufs_vec(
158 __rte_unused struct fm10k_rx_queue *rxq)
163 void __attribute__((weak))
164 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
169 int __attribute__((weak))
170 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
175 uint16_t __attribute__((weak))
176 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
177 __rte_unused struct rte_mbuf **tx_pkts,
178 __rte_unused uint16_t nb_pkts)
184 * reset queue to initial state, allocate software buffers used when starting
186 * return 0 on success
187 * return -ENOMEM if buffers cannot be allocated
188 * return -EINVAL if buffers do not satisfy alignment condition
191 rx_queue_reset(struct fm10k_rx_queue *q)
193 static const union fm10k_rx_desc zero = {{0} };
196 PMD_INIT_FUNC_TRACE();
198 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
202 for (i = 0; i < q->nb_desc; ++i) {
203 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
204 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
205 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
209 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
210 q->hw_ring[i].q.pkt_addr = dma_addr;
211 q->hw_ring[i].q.hdr_addr = dma_addr;
214 /* initialize extra software ring entries. Space for these extra
215 * entries is always allocated.
217 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
218 for (i = 0; i < q->nb_fake_desc; ++i) {
219 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
220 q->hw_ring[q->nb_desc + i] = zero;
225 q->next_trigger = q->alloc_thresh - 1;
226 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
227 q->rxrearm_start = 0;
234 * clean queue, descriptor rings, free software buffers used when stopping
238 rx_queue_clean(struct fm10k_rx_queue *q)
240 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
242 PMD_INIT_FUNC_TRACE();
244 /* zero descriptor rings */
245 for (i = 0; i < q->nb_desc; ++i)
246 q->hw_ring[i] = zero;
248 /* zero faked descriptors */
249 for (i = 0; i < q->nb_fake_desc; ++i)
250 q->hw_ring[q->nb_desc + i] = zero;
252 /* vPMD driver has a different way of releasing mbufs. */
253 if (q->rx_using_sse) {
254 fm10k_rx_queue_release_mbufs_vec(q);
258 /* free software buffers */
259 for (i = 0; i < q->nb_desc; ++i) {
261 rte_pktmbuf_free_seg(q->sw_ring[i]);
262 q->sw_ring[i] = NULL;
268 * free all queue memory used when releasing the queue (i.e. configure)
271 rx_queue_free(struct fm10k_rx_queue *q)
273 PMD_INIT_FUNC_TRACE();
275 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
278 rte_free(q->sw_ring);
287 * disable RX queue, wait unitl HW finished necessary flush operation
290 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
294 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
295 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
296 reg & ~FM10K_RXQCTL_ENABLE);
298 /* Wait 100us at most */
299 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
301 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
302 if (!(reg & FM10K_RXQCTL_ENABLE))
306 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
313 * reset queue to initial state, allocate software buffers used when starting
317 tx_queue_reset(struct fm10k_tx_queue *q)
319 PMD_INIT_FUNC_TRACE();
323 q->nb_free = q->nb_desc - 1;
324 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
325 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
329 * clean queue, descriptor rings, free software buffers used when stopping
333 tx_queue_clean(struct fm10k_tx_queue *q)
335 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
337 PMD_INIT_FUNC_TRACE();
339 /* zero descriptor rings */
340 for (i = 0; i < q->nb_desc; ++i)
341 q->hw_ring[i] = zero;
343 /* free software buffers */
344 for (i = 0; i < q->nb_desc; ++i) {
346 rte_pktmbuf_free_seg(q->sw_ring[i]);
347 q->sw_ring[i] = NULL;
353 * free all queue memory used when releasing the queue (i.e. configure)
356 tx_queue_free(struct fm10k_tx_queue *q)
358 PMD_INIT_FUNC_TRACE();
360 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
362 if (q->rs_tracker.list) {
363 rte_free(q->rs_tracker.list);
364 q->rs_tracker.list = NULL;
367 rte_free(q->sw_ring);
376 * disable TX queue, wait unitl HW finished necessary flush operation
379 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
383 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
384 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
385 reg & ~FM10K_TXDCTL_ENABLE);
387 /* Wait 100us at most */
388 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
390 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
391 if (!(reg & FM10K_TXDCTL_ENABLE))
395 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
402 fm10k_check_mq_mode(struct rte_eth_dev *dev)
404 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
405 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
406 struct rte_eth_vmdq_rx_conf *vmdq_conf;
407 uint16_t nb_rx_q = dev->data->nb_rx_queues;
409 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
411 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
412 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
416 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
419 if (hw->mac.type == fm10k_mac_vf) {
420 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
424 /* Check VMDQ queue pool number */
425 if (vmdq_conf->nb_queue_pools >
426 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
427 vmdq_conf->nb_queue_pools > nb_rx_q) {
428 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
429 vmdq_conf->nb_queue_pools);
436 static const struct fm10k_txq_ops def_txq_ops = {
437 .reset = tx_queue_reset,
441 fm10k_dev_configure(struct rte_eth_dev *dev)
445 PMD_INIT_FUNC_TRACE();
447 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
448 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
449 /* multipe queue mode checking */
450 ret = fm10k_check_mq_mode(dev);
452 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
460 /* fls = find last set bit = 32 minus the number of leading zeros */
462 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
466 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
468 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
469 struct rte_eth_vmdq_rx_conf *vmdq_conf;
472 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
474 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
475 if (!vmdq_conf->pool_map[i].pools)
478 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
479 fm10k_mbx_unlock(hw);
484 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
486 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488 /* Add default mac address */
489 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
490 MAIN_VSI_POOL_NUMBER);
494 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
496 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
497 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
498 uint32_t mrqc, *key, i, reta, j;
501 #define RSS_KEY_SIZE 40
502 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
503 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
504 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
505 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
506 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
507 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
510 if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
511 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
512 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
516 /* random key is rss_intel_key (default) or user provided (rss_key) */
517 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
518 key = (uint32_t *)rss_intel_key;
520 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
522 /* Now fill our hash function seeds, 4 bytes at a time */
523 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
524 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
527 * Fill in redirection table
528 * The byte-swap is needed because NIC registers are in
529 * little-endian order.
532 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
533 if (j == dev->data->nb_rx_queues)
535 reta = (reta << CHAR_BIT) | j;
537 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
542 * Generate RSS hash based on packet types, TCP/UDP
543 * port numbers and/or IPv4/v6 src and dst addresses
545 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
547 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
548 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
549 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
550 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
551 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
552 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
553 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
554 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
555 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
558 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
563 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
567 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
569 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
572 for (i = 0; i < nb_lport_new; i++) {
573 /* Set unicast mode by default. App can change
574 * to other mode in other API func.
577 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
578 FM10K_XCAST_MODE_NONE);
579 fm10k_mbx_unlock(hw);
584 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
586 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
587 struct rte_eth_vmdq_rx_conf *vmdq_conf;
588 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
589 struct fm10k_macvlan_filter_info *macvlan;
590 uint16_t nb_queue_pools = 0; /* pool number in configuration */
591 uint16_t nb_lport_new;
593 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
594 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
596 fm10k_dev_rss_configure(dev);
598 /* only PF supports VMDQ */
599 if (hw->mac.type != fm10k_mac_pf)
602 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
603 nb_queue_pools = vmdq_conf->nb_queue_pools;
605 /* no pool number change, no need to update logic port and VLAN/MAC */
606 if (macvlan->nb_queue_pools == nb_queue_pools)
609 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
610 fm10k_dev_logic_port_update(dev, nb_lport_new);
612 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
613 memset(dev->data->mac_addrs, 0,
614 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
615 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
616 &dev->data->mac_addrs[0]);
617 memset(macvlan, 0, sizeof(*macvlan));
618 macvlan->nb_queue_pools = nb_queue_pools;
621 fm10k_dev_vmdq_rx_configure(dev);
623 fm10k_dev_pf_main_vsi_reset(dev);
627 fm10k_dev_tx_init(struct rte_eth_dev *dev)
629 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
631 struct fm10k_tx_queue *txq;
635 /* Disable TXINT to avoid possible interrupt */
636 for (i = 0; i < hw->mac.max_queues; i++)
637 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
638 3 << FM10K_TXINT_TIMER_SHIFT);
641 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
642 txq = dev->data->tx_queues[i];
643 base_addr = txq->hw_ring_phys_addr;
644 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
646 /* disable queue to avoid issues while updating state */
647 ret = tx_queue_disable(hw, i);
649 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
652 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
653 * register is read-only for VF.
655 if (fm10k_check_ftag(dev->device->devargs)) {
656 if (hw->mac.type == fm10k_mac_pf) {
657 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
658 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
659 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
661 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
666 /* set location and size for descriptor ring */
667 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
668 base_addr & UINT64_LOWER_32BITS_MASK);
669 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
670 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
671 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
673 /* assign default SGLORT for each TX queue by PF */
674 if (hw->mac.type == fm10k_mac_pf)
675 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
678 /* set up vector or scalar TX function as appropriate */
679 fm10k_set_tx_function(dev);
685 fm10k_dev_rx_init(struct rte_eth_dev *dev)
687 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
688 struct fm10k_macvlan_filter_info *macvlan;
689 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
690 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
692 struct fm10k_rx_queue *rxq;
695 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
696 uint32_t logic_port = hw->mac.dglort_map;
698 uint16_t queue_stride = 0;
700 /* enable RXINT for interrupt mode */
702 if (rte_intr_dp_is_en(intr_handle)) {
703 for (; i < dev->data->nb_rx_queues; i++) {
704 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
705 if (hw->mac.type == fm10k_mac_pf)
706 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
708 FM10K_ITR_MASK_CLEAR);
710 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
712 FM10K_ITR_MASK_CLEAR);
715 /* Disable other RXINT to avoid possible interrupt */
716 for (; i < hw->mac.max_queues; i++)
717 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
718 3 << FM10K_RXINT_TIMER_SHIFT);
720 /* Setup RX queues */
721 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
722 rxq = dev->data->rx_queues[i];
723 base_addr = rxq->hw_ring_phys_addr;
724 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
726 /* disable queue to avoid issues while updating state */
727 ret = rx_queue_disable(hw, i);
729 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
733 /* Setup the Base and Length of the Rx Descriptor Ring */
734 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
735 base_addr & UINT64_LOWER_32BITS_MASK);
736 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
737 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
738 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
740 /* Configure the Rx buffer size for one buff without split */
741 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
742 RTE_PKTMBUF_HEADROOM);
743 /* As RX buffer is aligned to 512B within mbuf, some bytes are
744 * reserved for this purpose, and the worst case could be 511B.
745 * But SRR reg assumes all buffers have the same size. In order
746 * to fill the gap, we'll have to consider the worst case and
747 * assume 512B is reserved. If we don't do so, it's possible
748 * for HW to overwrite data to next mbuf.
750 buf_size -= FM10K_RX_DATABUF_ALIGN;
752 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
753 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
754 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
756 /* It adds dual VLAN length for supporting dual VLAN */
757 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
758 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
759 dev->data->dev_conf.rxmode.enable_scatter) {
761 dev->data->scattered_rx = 1;
762 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
763 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
764 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
767 /* Enable drop on empty, it's RO for VF */
768 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
769 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
771 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
772 FM10K_WRITE_FLUSH(hw);
775 /* Configure VMDQ/RSS if applicable */
776 fm10k_dev_mq_rx_configure(dev);
778 /* Decide the best RX function */
779 fm10k_set_rx_function(dev);
781 /* update RX_SGLORT for loopback suppress*/
782 if (hw->mac.type != fm10k_mac_pf)
784 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
785 if (macvlan->nb_queue_pools)
786 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
787 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
788 if (i && queue_stride && !(i % queue_stride))
790 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
797 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
799 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
802 struct fm10k_rx_queue *rxq;
804 PMD_INIT_FUNC_TRACE();
806 if (rx_queue_id < dev->data->nb_rx_queues) {
807 rxq = dev->data->rx_queues[rx_queue_id];
808 err = rx_queue_reset(rxq);
809 if (err == -ENOMEM) {
810 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
812 } else if (err == -EINVAL) {
813 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
818 /* Setup the HW Rx Head and Tail Descriptor Pointers
819 * Note: this must be done AFTER the queue is enabled on real
820 * hardware, but BEFORE the queue is enabled when using the
821 * emulation platform. Do it in both places for now and remove
822 * this comment and the following two register writes when the
823 * emulation platform is no longer being used.
825 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
826 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
828 /* Set PF ownership flag for PF devices */
829 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
830 if (hw->mac.type == fm10k_mac_pf)
831 reg |= FM10K_RXQCTL_PF;
832 reg |= FM10K_RXQCTL_ENABLE;
833 /* enable RX queue */
834 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
835 FM10K_WRITE_FLUSH(hw);
837 /* Setup the HW Rx Head and Tail Descriptor Pointers
838 * Note: this must be done AFTER the queue is enabled
840 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
841 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
842 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
849 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
851 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
853 PMD_INIT_FUNC_TRACE();
855 if (rx_queue_id < dev->data->nb_rx_queues) {
856 /* Disable RX queue */
857 rx_queue_disable(hw, rx_queue_id);
859 /* Free mbuf and clean HW ring */
860 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
861 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
868 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
870 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
871 /** @todo - this should be defined in the shared code */
872 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
873 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
876 PMD_INIT_FUNC_TRACE();
878 if (tx_queue_id < dev->data->nb_tx_queues) {
879 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
883 /* reset head and tail pointers */
884 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
885 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
887 /* enable TX queue */
888 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
889 FM10K_TXDCTL_ENABLE | txdctl);
890 FM10K_WRITE_FLUSH(hw);
891 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
899 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
901 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
903 PMD_INIT_FUNC_TRACE();
905 if (tx_queue_id < dev->data->nb_tx_queues) {
906 tx_queue_disable(hw, tx_queue_id);
907 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
908 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
914 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
916 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
917 != FM10K_DGLORTMAP_NONE);
921 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
923 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
926 PMD_INIT_FUNC_TRACE();
928 /* Return if it didn't acquire valid glort range */
929 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
933 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
934 FM10K_XCAST_MODE_PROMISC);
935 fm10k_mbx_unlock(hw);
937 if (status != FM10K_SUCCESS)
938 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
942 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
944 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
948 PMD_INIT_FUNC_TRACE();
950 /* Return if it didn't acquire valid glort range */
951 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
954 if (dev->data->all_multicast == 1)
955 mode = FM10K_XCAST_MODE_ALLMULTI;
957 mode = FM10K_XCAST_MODE_NONE;
960 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
962 fm10k_mbx_unlock(hw);
964 if (status != FM10K_SUCCESS)
965 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
969 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
971 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
974 PMD_INIT_FUNC_TRACE();
976 /* Return if it didn't acquire valid glort range */
977 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
980 /* If promiscuous mode is enabled, it doesn't make sense to enable
981 * allmulticast and disable promiscuous since fm10k only can select
984 if (dev->data->promiscuous) {
985 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
986 "needn't enable allmulticast");
991 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
992 FM10K_XCAST_MODE_ALLMULTI);
993 fm10k_mbx_unlock(hw);
995 if (status != FM10K_SUCCESS)
996 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1000 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1002 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1005 PMD_INIT_FUNC_TRACE();
1007 /* Return if it didn't acquire valid glort range */
1008 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1011 if (dev->data->promiscuous) {
1012 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1013 "since promisc mode is enabled");
1018 /* Change mode to unicast mode */
1019 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1020 FM10K_XCAST_MODE_NONE);
1021 fm10k_mbx_unlock(hw);
1023 if (status != FM10K_SUCCESS)
1024 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1028 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1030 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1031 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1032 uint16_t nb_queue_pools;
1033 struct fm10k_macvlan_filter_info *macvlan;
1035 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1036 nb_queue_pools = macvlan->nb_queue_pools;
1037 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1038 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1040 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1041 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1042 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1044 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1045 /* Configure VMDQ/RSS DGlort Decoder */
1046 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1048 /* Flow Director configurations, only queue number is valid. */
1049 dglortdec = fls(dev->data->nb_rx_queues - 1);
1050 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1051 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1052 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1053 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1055 /* Invalidate all other GLORT entries */
1056 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1057 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1058 FM10K_DGLORTMAP_NONE);
1061 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1063 fm10k_dev_start(struct rte_eth_dev *dev)
1065 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1068 PMD_INIT_FUNC_TRACE();
1070 /* stop, init, then start the hw */
1071 diag = fm10k_stop_hw(hw);
1072 if (diag != FM10K_SUCCESS) {
1073 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1077 diag = fm10k_init_hw(hw);
1078 if (diag != FM10K_SUCCESS) {
1079 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1083 diag = fm10k_start_hw(hw);
1084 if (diag != FM10K_SUCCESS) {
1085 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1089 diag = fm10k_dev_tx_init(dev);
1091 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1095 if (fm10k_dev_rxq_interrupt_setup(dev))
1098 diag = fm10k_dev_rx_init(dev);
1100 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1104 if (hw->mac.type == fm10k_mac_pf)
1105 fm10k_dev_dglort_map_configure(dev);
1107 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1108 struct fm10k_rx_queue *rxq;
1109 rxq = dev->data->rx_queues[i];
1111 if (rxq->rx_deferred_start)
1113 diag = fm10k_dev_rx_queue_start(dev, i);
1116 for (j = 0; j < i; ++j)
1117 rx_queue_clean(dev->data->rx_queues[j]);
1122 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1123 struct fm10k_tx_queue *txq;
1124 txq = dev->data->tx_queues[i];
1126 if (txq->tx_deferred_start)
1128 diag = fm10k_dev_tx_queue_start(dev, i);
1131 for (j = 0; j < i; ++j)
1132 tx_queue_clean(dev->data->tx_queues[j]);
1133 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1134 rx_queue_clean(dev->data->rx_queues[j]);
1139 /* Update default vlan when not in VMDQ mode */
1140 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1141 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1143 fm10k_link_update(dev, 0);
1149 fm10k_dev_stop(struct rte_eth_dev *dev)
1151 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1153 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1156 PMD_INIT_FUNC_TRACE();
1158 if (dev->data->tx_queues)
1159 for (i = 0; i < dev->data->nb_tx_queues; i++)
1160 fm10k_dev_tx_queue_stop(dev, i);
1162 if (dev->data->rx_queues)
1163 for (i = 0; i < dev->data->nb_rx_queues; i++)
1164 fm10k_dev_rx_queue_stop(dev, i);
1166 /* Disable datapath event */
1167 if (rte_intr_dp_is_en(intr_handle)) {
1168 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1169 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1170 3 << FM10K_RXINT_TIMER_SHIFT);
1171 if (hw->mac.type == fm10k_mac_pf)
1172 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1173 FM10K_ITR_MASK_SET);
1175 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1176 FM10K_ITR_MASK_SET);
1179 /* Clean datapath event and queue/vec mapping */
1180 rte_intr_efd_disable(intr_handle);
1181 rte_free(intr_handle->intr_vec);
1182 intr_handle->intr_vec = NULL;
1186 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1190 PMD_INIT_FUNC_TRACE();
1192 if (dev->data->tx_queues) {
1193 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1194 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1200 if (dev->data->rx_queues) {
1201 for (i = 0; i < dev->data->nb_rx_queues; i++)
1202 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1207 fm10k_dev_close(struct rte_eth_dev *dev)
1209 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1211 PMD_INIT_FUNC_TRACE();
1214 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1215 MAX_LPORT_NUM, false);
1216 fm10k_mbx_unlock(hw);
1218 /* allow 100ms for device to quiesce */
1219 rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1221 /* Stop mailbox service first */
1222 fm10k_close_mbx_service(hw);
1223 fm10k_dev_stop(dev);
1224 fm10k_dev_queue_release(dev);
1229 fm10k_link_update(struct rte_eth_dev *dev,
1230 __rte_unused int wait_to_complete)
1232 struct fm10k_dev_info *dev_info =
1233 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1234 PMD_INIT_FUNC_TRACE();
1236 /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1237 * leave the speed undefined since there is no 50Gbps Ethernet.
1239 dev->data->dev_link.link_speed = 0;
1240 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1241 dev->data->dev_link.link_status =
1242 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1247 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1248 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1253 if (xstats_names != NULL) {
1254 /* Note: limit checked in rte_eth_xstats_names() */
1257 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1258 snprintf(xstats_names[count].name,
1259 sizeof(xstats_names[count].name),
1260 "%s", fm10k_hw_stats_strings[count].name);
1264 /* PF queue stats */
1265 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1266 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1267 snprintf(xstats_names[count].name,
1268 sizeof(xstats_names[count].name),
1270 fm10k_hw_stats_rx_q_strings[i].name);
1273 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1274 snprintf(xstats_names[count].name,
1275 sizeof(xstats_names[count].name),
1277 fm10k_hw_stats_tx_q_strings[i].name);
1282 return FM10K_NB_XSTATS;
1286 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1289 struct fm10k_hw_stats *hw_stats =
1290 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1291 unsigned i, q, count = 0;
1293 if (n < FM10K_NB_XSTATS)
1294 return FM10K_NB_XSTATS;
1297 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1298 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1299 fm10k_hw_stats_strings[count].offset);
1300 xstats[count].id = count;
1304 /* PF queue stats */
1305 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1306 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1307 xstats[count].value =
1308 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1309 fm10k_hw_stats_rx_q_strings[i].offset);
1310 xstats[count].id = count;
1313 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1314 xstats[count].value =
1315 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1316 fm10k_hw_stats_tx_q_strings[i].offset);
1317 xstats[count].id = count;
1322 return FM10K_NB_XSTATS;
1326 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1328 uint64_t ipackets, opackets, ibytes, obytes;
1329 struct fm10k_hw *hw =
1330 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331 struct fm10k_hw_stats *hw_stats =
1332 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1335 PMD_INIT_FUNC_TRACE();
1337 fm10k_update_hw_stats(hw, hw_stats);
1339 ipackets = opackets = ibytes = obytes = 0;
1340 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1341 (i < hw->mac.max_queues); ++i) {
1342 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1343 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1344 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1345 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1346 ipackets += stats->q_ipackets[i];
1347 opackets += stats->q_opackets[i];
1348 ibytes += stats->q_ibytes[i];
1349 obytes += stats->q_obytes[i];
1351 stats->ipackets = ipackets;
1352 stats->opackets = opackets;
1353 stats->ibytes = ibytes;
1354 stats->obytes = obytes;
1359 fm10k_stats_reset(struct rte_eth_dev *dev)
1361 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362 struct fm10k_hw_stats *hw_stats =
1363 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1365 PMD_INIT_FUNC_TRACE();
1367 memset(hw_stats, 0, sizeof(*hw_stats));
1368 fm10k_rebind_hw_stats(hw, hw_stats);
1372 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1373 struct rte_eth_dev_info *dev_info)
1375 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1376 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1378 PMD_INIT_FUNC_TRACE();
1380 dev_info->pci_dev = pdev;
1381 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1382 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1383 dev_info->max_rx_queues = hw->mac.max_queues;
1384 dev_info->max_tx_queues = hw->mac.max_queues;
1385 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1386 dev_info->max_hash_mac_addrs = 0;
1387 dev_info->max_vfs = pdev->max_vfs;
1388 dev_info->vmdq_pool_base = 0;
1389 dev_info->vmdq_queue_base = 0;
1390 dev_info->max_vmdq_pools = ETH_32_POOLS;
1391 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1392 dev_info->rx_offload_capa =
1393 DEV_RX_OFFLOAD_VLAN_STRIP |
1394 DEV_RX_OFFLOAD_IPV4_CKSUM |
1395 DEV_RX_OFFLOAD_UDP_CKSUM |
1396 DEV_RX_OFFLOAD_TCP_CKSUM;
1397 dev_info->tx_offload_capa =
1398 DEV_TX_OFFLOAD_VLAN_INSERT |
1399 DEV_TX_OFFLOAD_IPV4_CKSUM |
1400 DEV_TX_OFFLOAD_UDP_CKSUM |
1401 DEV_TX_OFFLOAD_TCP_CKSUM |
1402 DEV_TX_OFFLOAD_TCP_TSO;
1404 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1405 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1407 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1409 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1410 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1411 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1413 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1417 dev_info->default_txconf = (struct rte_eth_txconf) {
1419 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1420 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1421 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1423 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1424 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1425 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1428 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1429 .nb_max = FM10K_MAX_RX_DESC,
1430 .nb_min = FM10K_MIN_RX_DESC,
1431 .nb_align = FM10K_MULT_RX_DESC,
1434 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1435 .nb_max = FM10K_MAX_TX_DESC,
1436 .nb_min = FM10K_MIN_TX_DESC,
1437 .nb_align = FM10K_MULT_TX_DESC,
1438 .nb_seg_max = FM10K_TX_MAX_SEG,
1439 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1442 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1443 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1444 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1447 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1448 static const uint32_t *
1449 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1451 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1452 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1453 static uint32_t ptypes[] = {
1454 /* refers to rx_desc_to_ol_flags() */
1457 RTE_PTYPE_L3_IPV4_EXT,
1459 RTE_PTYPE_L3_IPV6_EXT,
1466 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1467 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1468 static uint32_t ptypes_vec[] = {
1469 /* refers to fm10k_desc_to_pktype_v() */
1471 RTE_PTYPE_L3_IPV4_EXT,
1473 RTE_PTYPE_L3_IPV6_EXT,
1476 RTE_PTYPE_TUNNEL_GENEVE,
1477 RTE_PTYPE_TUNNEL_NVGRE,
1478 RTE_PTYPE_TUNNEL_VXLAN,
1479 RTE_PTYPE_TUNNEL_GRE,
1489 static const uint32_t *
1490 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1497 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1500 uint16_t mac_num = 0;
1501 uint32_t vid_idx, vid_bit, mac_index;
1502 struct fm10k_hw *hw;
1503 struct fm10k_macvlan_filter_info *macvlan;
1504 struct rte_eth_dev_data *data = dev->data;
1506 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1507 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1509 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1510 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1514 if (vlan_id > ETH_VLAN_ID_MAX) {
1515 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1519 vid_idx = FM10K_VFTA_IDX(vlan_id);
1520 vid_bit = FM10K_VFTA_BIT(vlan_id);
1521 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1522 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1524 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1525 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1526 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1527 "in the VLAN filter table");
1532 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1533 fm10k_mbx_unlock(hw);
1534 if (result != FM10K_SUCCESS) {
1535 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1539 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1540 (result == FM10K_SUCCESS); mac_index++) {
1541 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1543 if (mac_num > macvlan->mac_num - 1) {
1544 PMD_INIT_LOG(ERR, "MAC address number "
1549 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1550 data->mac_addrs[mac_index].addr_bytes,
1552 fm10k_mbx_unlock(hw);
1555 if (result != FM10K_SUCCESS) {
1556 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1561 macvlan->vlan_num++;
1562 macvlan->vfta[vid_idx] |= vid_bit;
1564 macvlan->vlan_num--;
1565 macvlan->vfta[vid_idx] &= ~vid_bit;
1571 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1573 if (mask & ETH_VLAN_STRIP_MASK) {
1574 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1575 PMD_INIT_LOG(ERR, "VLAN stripping is "
1576 "always on in fm10k");
1579 if (mask & ETH_VLAN_EXTEND_MASK) {
1580 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1581 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1582 "supported in fm10k");
1585 if (mask & ETH_VLAN_FILTER_MASK) {
1586 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1587 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1593 /* Add/Remove a MAC address, and update filters to main VSI */
1594 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1595 const u8 *mac, bool add, uint32_t pool)
1597 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598 struct fm10k_macvlan_filter_info *macvlan;
1601 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1603 if (pool != MAIN_VSI_POOL_NUMBER) {
1604 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1605 "mac to pool %u", pool);
1608 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1609 if (!macvlan->vfta[j])
1611 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1612 if (!(macvlan->vfta[j] & (1 << k)))
1614 if (i + 1 > macvlan->vlan_num) {
1615 PMD_INIT_LOG(ERR, "vlan number not match");
1619 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1620 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1621 fm10k_mbx_unlock(hw);
1627 /* Add/Remove a MAC address, and update filters to VMDQ */
1628 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1629 const u8 *mac, bool add, uint32_t pool)
1631 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632 struct fm10k_macvlan_filter_info *macvlan;
1633 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1636 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1637 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1639 if (pool > macvlan->nb_queue_pools) {
1640 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1642 pool, macvlan->nb_queue_pools);
1645 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1646 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1649 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1650 vmdq_conf->pool_map[i].vlan_id, add, 0);
1651 fm10k_mbx_unlock(hw);
1655 /* Add/Remove a MAC address, and update filters */
1656 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1657 const u8 *mac, bool add, uint32_t pool)
1659 struct fm10k_macvlan_filter_info *macvlan;
1661 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1663 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1664 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1666 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1674 /* Add a MAC address, and update filters */
1676 fm10k_macaddr_add(struct rte_eth_dev *dev,
1677 struct ether_addr *mac_addr,
1681 struct fm10k_macvlan_filter_info *macvlan;
1683 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1684 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1685 macvlan->mac_vmdq_id[index] = pool;
1689 /* Remove a MAC address, and update filters */
1691 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1693 struct rte_eth_dev_data *data = dev->data;
1694 struct fm10k_macvlan_filter_info *macvlan;
1696 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1697 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1698 FALSE, macvlan->mac_vmdq_id[index]);
1699 macvlan->mac_vmdq_id[index] = 0;
1703 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1705 if ((request < min) || (request > max) || ((request % mult) != 0))
1713 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1715 if ((request < min) || (request > max) || ((div % request) != 0))
1722 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1724 uint16_t rx_free_thresh;
1726 if (conf->rx_free_thresh == 0)
1727 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1729 rx_free_thresh = conf->rx_free_thresh;
1731 /* make sure the requested threshold satisfies the constraints */
1732 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1733 FM10K_RX_FREE_THRESH_MAX(q),
1734 FM10K_RX_FREE_THRESH_DIV(q),
1736 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1737 "less than or equal to %u, "
1738 "greater than or equal to %u, "
1739 "and a divisor of %u",
1740 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1741 FM10K_RX_FREE_THRESH_MIN(q),
1742 FM10K_RX_FREE_THRESH_DIV(q));
1746 q->alloc_thresh = rx_free_thresh;
1747 q->drop_en = conf->rx_drop_en;
1748 q->rx_deferred_start = conf->rx_deferred_start;
1754 * Hardware requires specific alignment for Rx packet buffers. At
1755 * least one of the following two conditions must be satisfied.
1756 * 1. Address is 512B aligned
1757 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1759 * As such, the driver may need to adjust the DMA address within the
1760 * buffer by up to 512B.
1762 * return 1 if the element size is valid, otherwise return 0.
1765 mempool_element_size_valid(struct rte_mempool *mp)
1769 /* elt_size includes mbuf header and headroom */
1770 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1771 RTE_PKTMBUF_HEADROOM;
1773 /* account for up to 512B of alignment */
1774 min_size -= FM10K_RX_DATABUF_ALIGN;
1776 /* sanity check for overflow */
1777 if (min_size > mp->elt_size)
1785 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1786 uint16_t nb_desc, unsigned int socket_id,
1787 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1789 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790 struct fm10k_dev_info *dev_info =
1791 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1792 struct fm10k_rx_queue *q;
1793 const struct rte_memzone *mz;
1795 PMD_INIT_FUNC_TRACE();
1797 /* make sure the mempool element size can account for alignment. */
1798 if (!mempool_element_size_valid(mp)) {
1799 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1803 /* make sure a valid number of descriptors have been requested */
1804 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1805 FM10K_MULT_RX_DESC, nb_desc)) {
1806 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1807 "less than or equal to %"PRIu32", "
1808 "greater than or equal to %u, "
1809 "and a multiple of %u",
1810 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1811 FM10K_MULT_RX_DESC);
1816 * if this queue existed already, free the associated memory. The
1817 * queue cannot be reused in case we need to allocate memory on
1818 * different socket than was previously used.
1820 if (dev->data->rx_queues[queue_id] != NULL) {
1821 rx_queue_free(dev->data->rx_queues[queue_id]);
1822 dev->data->rx_queues[queue_id] = NULL;
1825 /* allocate memory for the queue structure */
1826 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1829 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1835 q->nb_desc = nb_desc;
1836 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1837 q->port_id = dev->data->port_id;
1838 q->queue_id = queue_id;
1839 q->tail_ptr = (volatile uint32_t *)
1840 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1841 if (handle_rxconf(q, conf))
1844 /* allocate memory for the software ring */
1845 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1846 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1847 RTE_CACHE_LINE_SIZE, socket_id);
1848 if (q->sw_ring == NULL) {
1849 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1855 * allocate memory for the hardware descriptor ring. A memzone large
1856 * enough to hold the maximum ring size is requested to allow for
1857 * resizing in later calls to the queue setup function.
1859 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1860 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1863 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1864 rte_free(q->sw_ring);
1868 q->hw_ring = mz->addr;
1869 q->hw_ring_phys_addr = mz->iova;
1871 /* Check if number of descs satisfied Vector requirement */
1872 if (!rte_is_power_of_2(nb_desc)) {
1873 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1874 "preconditions - canceling the feature for "
1875 "the whole port[%d]",
1876 q->queue_id, q->port_id);
1877 dev_info->rx_vec_allowed = false;
1879 fm10k_rxq_vec_setup(q);
1881 dev->data->rx_queues[queue_id] = q;
1886 fm10k_rx_queue_release(void *queue)
1888 PMD_INIT_FUNC_TRACE();
1890 rx_queue_free(queue);
1894 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1896 uint16_t tx_free_thresh;
1897 uint16_t tx_rs_thresh;
1899 /* constraint MACROs require that tx_free_thresh is configured
1900 * before tx_rs_thresh */
1901 if (conf->tx_free_thresh == 0)
1902 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1904 tx_free_thresh = conf->tx_free_thresh;
1906 /* make sure the requested threshold satisfies the constraints */
1907 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1908 FM10K_TX_FREE_THRESH_MAX(q),
1909 FM10K_TX_FREE_THRESH_DIV(q),
1911 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1912 "less than or equal to %u, "
1913 "greater than or equal to %u, "
1914 "and a divisor of %u",
1915 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1916 FM10K_TX_FREE_THRESH_MIN(q),
1917 FM10K_TX_FREE_THRESH_DIV(q));
1921 q->free_thresh = tx_free_thresh;
1923 if (conf->tx_rs_thresh == 0)
1924 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1926 tx_rs_thresh = conf->tx_rs_thresh;
1928 q->tx_deferred_start = conf->tx_deferred_start;
1930 /* make sure the requested threshold satisfies the constraints */
1931 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1932 FM10K_TX_RS_THRESH_MAX(q),
1933 FM10K_TX_RS_THRESH_DIV(q),
1935 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1936 "less than or equal to %u, "
1937 "greater than or equal to %u, "
1938 "and a divisor of %u",
1939 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1940 FM10K_TX_RS_THRESH_MIN(q),
1941 FM10K_TX_RS_THRESH_DIV(q));
1945 q->rs_thresh = tx_rs_thresh;
1951 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1952 uint16_t nb_desc, unsigned int socket_id,
1953 const struct rte_eth_txconf *conf)
1955 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1956 struct fm10k_tx_queue *q;
1957 const struct rte_memzone *mz;
1959 PMD_INIT_FUNC_TRACE();
1961 /* make sure a valid number of descriptors have been requested */
1962 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1963 FM10K_MULT_TX_DESC, nb_desc)) {
1964 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1965 "less than or equal to %"PRIu32", "
1966 "greater than or equal to %u, "
1967 "and a multiple of %u",
1968 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1969 FM10K_MULT_TX_DESC);
1974 * if this queue existed already, free the associated memory. The
1975 * queue cannot be reused in case we need to allocate memory on
1976 * different socket than was previously used.
1978 if (dev->data->tx_queues[queue_id] != NULL) {
1979 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1982 dev->data->tx_queues[queue_id] = NULL;
1985 /* allocate memory for the queue structure */
1986 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1989 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1994 q->nb_desc = nb_desc;
1995 q->port_id = dev->data->port_id;
1996 q->queue_id = queue_id;
1997 q->txq_flags = conf->txq_flags;
1998 q->ops = &def_txq_ops;
1999 q->tail_ptr = (volatile uint32_t *)
2000 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2001 if (handle_txconf(q, conf))
2004 /* allocate memory for the software ring */
2005 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2006 nb_desc * sizeof(struct rte_mbuf *),
2007 RTE_CACHE_LINE_SIZE, socket_id);
2008 if (q->sw_ring == NULL) {
2009 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2015 * allocate memory for the hardware descriptor ring. A memzone large
2016 * enough to hold the maximum ring size is requested to allow for
2017 * resizing in later calls to the queue setup function.
2019 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2020 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2023 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2024 rte_free(q->sw_ring);
2028 q->hw_ring = mz->addr;
2029 q->hw_ring_phys_addr = mz->iova;
2032 * allocate memory for the RS bit tracker. Enough slots to hold the
2033 * descriptor index for each RS bit needing to be set are required.
2035 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2036 ((nb_desc + 1) / q->rs_thresh) *
2038 RTE_CACHE_LINE_SIZE, socket_id);
2039 if (q->rs_tracker.list == NULL) {
2040 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2041 rte_free(q->sw_ring);
2046 dev->data->tx_queues[queue_id] = q;
2051 fm10k_tx_queue_release(void *queue)
2053 struct fm10k_tx_queue *q = queue;
2054 PMD_INIT_FUNC_TRACE();
2060 fm10k_reta_update(struct rte_eth_dev *dev,
2061 struct rte_eth_rss_reta_entry64 *reta_conf,
2064 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065 uint16_t i, j, idx, shift;
2069 PMD_INIT_FUNC_TRACE();
2071 if (reta_size > FM10K_MAX_RSS_INDICES) {
2072 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2073 "(%d) doesn't match the number hardware can supported "
2074 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2079 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2080 * 128-entries in 32 registers
2082 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2083 idx = i / RTE_RETA_GROUP_SIZE;
2084 shift = i % RTE_RETA_GROUP_SIZE;
2085 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2086 BIT_MASK_PER_UINT32);
2091 if (mask != BIT_MASK_PER_UINT32)
2092 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2094 for (j = 0; j < CHARS_PER_UINT32; j++) {
2095 if (mask & (0x1 << j)) {
2097 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2098 reta |= reta_conf[idx].reta[shift + j] <<
2102 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2109 fm10k_reta_query(struct rte_eth_dev *dev,
2110 struct rte_eth_rss_reta_entry64 *reta_conf,
2113 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2114 uint16_t i, j, idx, shift;
2118 PMD_INIT_FUNC_TRACE();
2120 if (reta_size < FM10K_MAX_RSS_INDICES) {
2121 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2122 "(%d) doesn't match the number hardware can supported "
2123 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2128 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2129 * 128-entries in 32 registers
2131 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2132 idx = i / RTE_RETA_GROUP_SIZE;
2133 shift = i % RTE_RETA_GROUP_SIZE;
2134 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2135 BIT_MASK_PER_UINT32);
2139 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2140 for (j = 0; j < CHARS_PER_UINT32; j++) {
2141 if (mask & (0x1 << j))
2142 reta_conf[idx].reta[shift + j] = ((reta >>
2143 CHAR_BIT * j) & UINT8_MAX);
2151 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2152 struct rte_eth_rss_conf *rss_conf)
2154 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2157 uint64_t hf = rss_conf->rss_hf;
2160 PMD_INIT_FUNC_TRACE();
2162 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2163 FM10K_RSSRK_ENTRIES_PER_REG))
2170 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2171 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2172 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2173 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2174 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2175 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2176 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2177 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2178 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2180 /* If the mapping doesn't fit any supported, return */
2185 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2186 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2188 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2194 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2195 struct rte_eth_rss_conf *rss_conf)
2197 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2198 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2203 PMD_INIT_FUNC_TRACE();
2205 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2206 FM10K_RSSRK_ENTRIES_PER_REG))
2210 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2211 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2213 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2215 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2216 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2217 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2218 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2219 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2220 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2221 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2222 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2223 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2225 rss_conf->rss_hf = hf;
2231 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2233 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2234 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2236 /* Bind all local non-queue interrupt to vector 0 */
2237 int_map |= FM10K_MISC_VEC_ID;
2239 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2240 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2241 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2242 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2243 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2244 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2246 /* Enable misc causes */
2247 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2248 FM10K_EIMR_ENABLE(THI_FAULT) |
2249 FM10K_EIMR_ENABLE(FUM_FAULT) |
2250 FM10K_EIMR_ENABLE(MAILBOX) |
2251 FM10K_EIMR_ENABLE(SWITCHREADY) |
2252 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2253 FM10K_EIMR_ENABLE(SRAMERROR) |
2254 FM10K_EIMR_ENABLE(VFLR));
2257 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2258 FM10K_ITR_MASK_CLEAR);
2259 FM10K_WRITE_FLUSH(hw);
2263 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2265 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2266 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2268 int_map |= FM10K_MISC_VEC_ID;
2270 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2271 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2272 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2273 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2274 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2275 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2277 /* Disable misc causes */
2278 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2279 FM10K_EIMR_DISABLE(THI_FAULT) |
2280 FM10K_EIMR_DISABLE(FUM_FAULT) |
2281 FM10K_EIMR_DISABLE(MAILBOX) |
2282 FM10K_EIMR_DISABLE(SWITCHREADY) |
2283 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2284 FM10K_EIMR_DISABLE(SRAMERROR) |
2285 FM10K_EIMR_DISABLE(VFLR));
2288 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2289 FM10K_WRITE_FLUSH(hw);
2293 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2295 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2296 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2298 /* Bind all local non-queue interrupt to vector 0 */
2299 int_map |= FM10K_MISC_VEC_ID;
2301 /* Only INT 0 available, other 15 are reserved. */
2302 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2305 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2306 FM10K_ITR_MASK_CLEAR);
2307 FM10K_WRITE_FLUSH(hw);
2311 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2313 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2314 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2316 int_map |= FM10K_MISC_VEC_ID;
2318 /* Only INT 0 available, other 15 are reserved. */
2319 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2322 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2323 FM10K_WRITE_FLUSH(hw);
2327 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2329 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2330 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2333 if (hw->mac.type == fm10k_mac_pf)
2334 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2335 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2337 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2338 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2339 rte_intr_enable(&pdev->intr_handle);
2344 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2346 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2347 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2350 if (hw->mac.type == fm10k_mac_pf)
2351 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2352 FM10K_ITR_MASK_SET);
2354 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2355 FM10K_ITR_MASK_SET);
2360 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2362 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2364 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2365 uint32_t intr_vector, vec;
2369 /* fm10k needs one separate interrupt for mailbox,
2370 * so only drivers which support multiple interrupt vectors
2371 * e.g. vfio-pci can work for fm10k interrupt mode
2373 if (!rte_intr_cap_multiple(intr_handle) ||
2374 dev->data->dev_conf.intr_conf.rxq == 0)
2377 intr_vector = dev->data->nb_rx_queues;
2379 /* disable interrupt first */
2380 rte_intr_disable(intr_handle);
2381 if (hw->mac.type == fm10k_mac_pf)
2382 fm10k_dev_disable_intr_pf(dev);
2384 fm10k_dev_disable_intr_vf(dev);
2386 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2387 PMD_INIT_LOG(ERR, "Failed to init event fd");
2391 if (rte_intr_dp_is_en(intr_handle) && !result) {
2392 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2393 dev->data->nb_rx_queues * sizeof(int), 0);
2394 if (intr_handle->intr_vec) {
2395 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2396 queue_id < dev->data->nb_rx_queues;
2398 intr_handle->intr_vec[queue_id] = vec;
2399 if (vec < intr_handle->nb_efd - 1
2400 + FM10K_RX_VEC_START)
2404 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2405 " intr_vec", dev->data->nb_rx_queues);
2406 rte_intr_efd_disable(intr_handle);
2411 if (hw->mac.type == fm10k_mac_pf)
2412 fm10k_dev_enable_intr_pf(dev);
2414 fm10k_dev_enable_intr_vf(dev);
2415 rte_intr_enable(intr_handle);
2416 hw->mac.ops.update_int_moderator(hw);
2421 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2423 struct fm10k_fault fault;
2425 const char *estr = "Unknown error";
2427 /* Process PCA fault */
2428 if (eicr & FM10K_EICR_PCA_FAULT) {
2429 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2432 switch (fault.type) {
2434 estr = "PCA_NO_FAULT"; break;
2435 case PCA_UNMAPPED_ADDR:
2436 estr = "PCA_UNMAPPED_ADDR"; break;
2437 case PCA_BAD_QACCESS_PF:
2438 estr = "PCA_BAD_QACCESS_PF"; break;
2439 case PCA_BAD_QACCESS_VF:
2440 estr = "PCA_BAD_QACCESS_VF"; break;
2441 case PCA_MALICIOUS_REQ:
2442 estr = "PCA_MALICIOUS_REQ"; break;
2443 case PCA_POISONED_TLP:
2444 estr = "PCA_POISONED_TLP"; break;
2446 estr = "PCA_TLP_ABORT"; break;
2450 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2451 estr, fault.func ? "VF" : "PF", fault.func,
2452 fault.address, fault.specinfo);
2455 /* Process THI fault */
2456 if (eicr & FM10K_EICR_THI_FAULT) {
2457 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2460 switch (fault.type) {
2462 estr = "THI_NO_FAULT"; break;
2463 case THI_MAL_DIS_Q_FAULT:
2464 estr = "THI_MAL_DIS_Q_FAULT"; break;
2468 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2469 estr, fault.func ? "VF" : "PF", fault.func,
2470 fault.address, fault.specinfo);
2473 /* Process FUM fault */
2474 if (eicr & FM10K_EICR_FUM_FAULT) {
2475 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2478 switch (fault.type) {
2480 estr = "FUM_NO_FAULT"; break;
2481 case FUM_UNMAPPED_ADDR:
2482 estr = "FUM_UNMAPPED_ADDR"; break;
2483 case FUM_POISONED_TLP:
2484 estr = "FUM_POISONED_TLP"; break;
2485 case FUM_BAD_VF_QACCESS:
2486 estr = "FUM_BAD_VF_QACCESS"; break;
2487 case FUM_ADD_DECODE_ERR:
2488 estr = "FUM_ADD_DECODE_ERR"; break;
2490 estr = "FUM_RO_ERROR"; break;
2491 case FUM_QPRC_CRC_ERROR:
2492 estr = "FUM_QPRC_CRC_ERROR"; break;
2493 case FUM_CSR_TIMEOUT:
2494 estr = "FUM_CSR_TIMEOUT"; break;
2495 case FUM_INVALID_TYPE:
2496 estr = "FUM_INVALID_TYPE"; break;
2497 case FUM_INVALID_LENGTH:
2498 estr = "FUM_INVALID_LENGTH"; break;
2499 case FUM_INVALID_BE:
2500 estr = "FUM_INVALID_BE"; break;
2501 case FUM_INVALID_ALIGN:
2502 estr = "FUM_INVALID_ALIGN"; break;
2506 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2507 estr, fault.func ? "VF" : "PF", fault.func,
2508 fault.address, fault.specinfo);
2513 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2518 * PF interrupt handler triggered by NIC for handling specific interrupt.
2521 * Pointer to interrupt handle.
2523 * The address of parameter (struct rte_eth_dev *) regsitered before.
2529 fm10k_dev_interrupt_handler_pf(void *param)
2531 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2532 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533 uint32_t cause, status;
2534 struct fm10k_dev_info *dev_info =
2535 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2539 if (hw->mac.type != fm10k_mac_pf)
2542 cause = FM10K_READ_REG(hw, FM10K_EICR);
2544 /* Handle PCI fault cases */
2545 if (cause & FM10K_EICR_FAULT_MASK) {
2546 PMD_INIT_LOG(ERR, "INT: find fault!");
2547 fm10k_dev_handle_fault(hw, cause);
2550 /* Handle switch up/down */
2551 if (cause & FM10K_EICR_SWITCHNOTREADY)
2552 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2554 if (cause & FM10K_EICR_SWITCHREADY) {
2555 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2556 if (dev_info->sm_down == 1) {
2559 /* For recreating logical ports */
2560 status_mbx = hw->mac.ops.update_lport_state(hw,
2561 hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2562 if (status_mbx == FM10K_SUCCESS)
2564 "INT: Recreated Logical port");
2567 "INT: Logical ports weren't recreated");
2569 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2570 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2571 if (status_mbx != FM10K_SUCCESS)
2572 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2574 fm10k_mbx_unlock(hw);
2576 /* first clear the internal SW recording structure */
2577 if (!(dev->data->dev_conf.rxmode.mq_mode &
2578 ETH_MQ_RX_VMDQ_FLAG))
2579 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2582 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2583 MAIN_VSI_POOL_NUMBER);
2586 * Add default mac address and vlan for the logical
2587 * ports that have been created, leave to the
2588 * application to fully recover Rx filtering.
2590 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2591 MAIN_VSI_POOL_NUMBER);
2593 if (!(dev->data->dev_conf.rxmode.mq_mode &
2594 ETH_MQ_RX_VMDQ_FLAG))
2595 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2598 dev_info->sm_down = 0;
2599 _rte_eth_dev_callback_process(dev,
2600 RTE_ETH_EVENT_INTR_LSC,
2605 /* Handle mailbox message */
2607 err = hw->mbx.ops.process(hw, &hw->mbx);
2608 fm10k_mbx_unlock(hw);
2610 if (err == FM10K_ERR_RESET_REQUESTED) {
2611 PMD_INIT_LOG(INFO, "INT: Switch is down");
2612 dev_info->sm_down = 1;
2613 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2617 /* Handle SRAM error */
2618 if (cause & FM10K_EICR_SRAMERROR) {
2619 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2621 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2622 /* Write to clear pending bits */
2623 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2625 /* Todo: print out error message after shared code updates */
2628 /* Clear these 3 events if having any */
2629 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2630 FM10K_EICR_SWITCHREADY;
2632 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2634 /* Re-enable interrupt from device side */
2635 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2636 FM10K_ITR_MASK_CLEAR);
2637 /* Re-enable interrupt from host side */
2638 rte_intr_enable(dev->intr_handle);
2642 * VF interrupt handler triggered by NIC for handling specific interrupt.
2645 * Pointer to interrupt handle.
2647 * The address of parameter (struct rte_eth_dev *) regsitered before.
2653 fm10k_dev_interrupt_handler_vf(void *param)
2655 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2656 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2657 struct fm10k_mbx_info *mbx = &hw->mbx;
2658 struct fm10k_dev_info *dev_info =
2659 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2660 const enum fm10k_mbx_state state = mbx->state;
2663 if (hw->mac.type != fm10k_mac_vf)
2666 /* Handle mailbox message if lock is acquired */
2668 hw->mbx.ops.process(hw, &hw->mbx);
2669 fm10k_mbx_unlock(hw);
2671 if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2672 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2675 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2677 fm10k_mbx_unlock(hw);
2679 /* Setting reset flag */
2680 dev_info->sm_down = 1;
2681 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2685 if (dev_info->sm_down == 1 &&
2686 hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2687 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2689 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2690 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2691 if (status_mbx != FM10K_SUCCESS)
2692 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2693 fm10k_mbx_unlock(hw);
2695 /* first clear the internal SW recording structure */
2696 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2697 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2698 MAIN_VSI_POOL_NUMBER);
2701 * Add default mac address and vlan for the logical ports that
2702 * have been created, leave to the application to fully recover
2705 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2706 MAIN_VSI_POOL_NUMBER);
2707 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2709 dev_info->sm_down = 0;
2710 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2714 /* Re-enable interrupt from device side */
2715 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2716 FM10K_ITR_MASK_CLEAR);
2717 /* Re-enable interrupt from host side */
2718 rte_intr_enable(dev->intr_handle);
2721 /* Mailbox message handler in VF */
2722 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2723 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2724 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2725 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2726 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2730 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2734 /* Initialize mailbox lock */
2735 fm10k_mbx_initlock(hw);
2737 /* Replace default message handler with new ones */
2738 if (hw->mac.type == fm10k_mac_vf)
2739 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2742 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2746 /* Connect to SM for PF device or PF for VF device */
2747 return hw->mbx.ops.connect(hw, &hw->mbx);
2751 fm10k_close_mbx_service(struct fm10k_hw *hw)
2753 /* Disconnect from SM for PF device or PF for VF device */
2754 hw->mbx.ops.disconnect(hw, &hw->mbx);
2757 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2758 .dev_configure = fm10k_dev_configure,
2759 .dev_start = fm10k_dev_start,
2760 .dev_stop = fm10k_dev_stop,
2761 .dev_close = fm10k_dev_close,
2762 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2763 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2764 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2765 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2766 .stats_get = fm10k_stats_get,
2767 .xstats_get = fm10k_xstats_get,
2768 .xstats_get_names = fm10k_xstats_get_names,
2769 .stats_reset = fm10k_stats_reset,
2770 .xstats_reset = fm10k_stats_reset,
2771 .link_update = fm10k_link_update,
2772 .dev_infos_get = fm10k_dev_infos_get,
2773 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2774 .vlan_filter_set = fm10k_vlan_filter_set,
2775 .vlan_offload_set = fm10k_vlan_offload_set,
2776 .mac_addr_add = fm10k_macaddr_add,
2777 .mac_addr_remove = fm10k_macaddr_remove,
2778 .rx_queue_start = fm10k_dev_rx_queue_start,
2779 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2780 .tx_queue_start = fm10k_dev_tx_queue_start,
2781 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2782 .rx_queue_setup = fm10k_rx_queue_setup,
2783 .rx_queue_release = fm10k_rx_queue_release,
2784 .tx_queue_setup = fm10k_tx_queue_setup,
2785 .tx_queue_release = fm10k_tx_queue_release,
2786 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2787 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2788 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2789 .reta_update = fm10k_reta_update,
2790 .reta_query = fm10k_reta_query,
2791 .rss_hash_update = fm10k_rss_hash_update,
2792 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2795 static int ftag_check_handler(__rte_unused const char *key,
2796 const char *value, __rte_unused void *opaque)
2798 if (strcmp(value, "1"))
2805 fm10k_check_ftag(struct rte_devargs *devargs)
2807 struct rte_kvargs *kvlist;
2808 const char *ftag_key = "enable_ftag";
2810 if (devargs == NULL)
2813 kvlist = rte_kvargs_parse(devargs->args, NULL);
2817 if (!rte_kvargs_count(kvlist, ftag_key)) {
2818 rte_kvargs_free(kvlist);
2821 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2822 if (rte_kvargs_process(kvlist, ftag_key,
2823 ftag_check_handler, NULL) < 0) {
2824 rte_kvargs_free(kvlist);
2827 rte_kvargs_free(kvlist);
2833 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2837 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2842 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2843 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2854 static void __attribute__((cold))
2855 fm10k_set_tx_function(struct rte_eth_dev *dev)
2857 struct fm10k_tx_queue *txq;
2860 uint16_t tx_ftag_en = 0;
2862 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2863 /* primary process has set the ftag flag and txq_flags */
2864 txq = dev->data->tx_queues[0];
2865 if (fm10k_tx_vec_condition_check(txq)) {
2866 dev->tx_pkt_burst = fm10k_xmit_pkts;
2867 dev->tx_pkt_prepare = fm10k_prep_pkts;
2868 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2870 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2871 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2872 dev->tx_pkt_prepare = NULL;
2877 if (fm10k_check_ftag(dev->device->devargs))
2880 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2881 txq = dev->data->tx_queues[i];
2882 txq->tx_ftag_en = tx_ftag_en;
2883 /* Check if Vector Tx is satisfied */
2884 if (fm10k_tx_vec_condition_check(txq))
2889 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2890 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2891 txq = dev->data->tx_queues[i];
2892 fm10k_txq_vec_setup(txq);
2894 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2895 dev->tx_pkt_prepare = NULL;
2897 dev->tx_pkt_burst = fm10k_xmit_pkts;
2898 dev->tx_pkt_prepare = fm10k_prep_pkts;
2899 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2903 static void __attribute__((cold))
2904 fm10k_set_rx_function(struct rte_eth_dev *dev)
2906 struct fm10k_dev_info *dev_info =
2907 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2908 uint16_t i, rx_using_sse;
2909 uint16_t rx_ftag_en = 0;
2911 if (fm10k_check_ftag(dev->device->devargs))
2914 /* In order to allow Vector Rx there are a few configuration
2915 * conditions to be met.
2917 if (!fm10k_rx_vec_condition_check(dev) &&
2918 dev_info->rx_vec_allowed && !rx_ftag_en) {
2919 if (dev->data->scattered_rx)
2920 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2922 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2923 } else if (dev->data->scattered_rx)
2924 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2926 dev->rx_pkt_burst = fm10k_recv_pkts;
2929 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2930 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2933 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2935 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2937 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2940 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2941 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2943 rxq->rx_using_sse = rx_using_sse;
2944 rxq->rx_ftag_en = rx_ftag_en;
2949 fm10k_params_init(struct rte_eth_dev *dev)
2951 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2952 struct fm10k_dev_info *info =
2953 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2955 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2956 * there is no way to get link status without reading BAR4. Until this
2957 * works, assume we have maximum bandwidth.
2958 * @todo - fix bus info
2960 hw->bus_caps.speed = fm10k_bus_speed_8000;
2961 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2962 hw->bus_caps.payload = fm10k_bus_payload_512;
2963 hw->bus.speed = fm10k_bus_speed_8000;
2964 hw->bus.width = fm10k_bus_width_pcie_x8;
2965 hw->bus.payload = fm10k_bus_payload_256;
2967 info->rx_vec_allowed = true;
2971 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2973 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2974 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2975 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2977 struct fm10k_macvlan_filter_info *macvlan;
2979 PMD_INIT_FUNC_TRACE();
2981 dev->dev_ops = &fm10k_eth_dev_ops;
2982 dev->rx_pkt_burst = &fm10k_recv_pkts;
2983 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2984 dev->tx_pkt_prepare = &fm10k_prep_pkts;
2987 * Primary process does the whole initialization, for secondary
2988 * processes, we just select the same Rx and Tx function as primary.
2990 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2991 fm10k_set_rx_function(dev);
2992 fm10k_set_tx_function(dev);
2996 rte_eth_copy_pci_info(dev, pdev);
2998 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2999 memset(macvlan, 0, sizeof(*macvlan));
3000 /* Vendor and Device ID need to be set before init of shared code */
3001 memset(hw, 0, sizeof(*hw));
3002 hw->device_id = pdev->id.device_id;
3003 hw->vendor_id = pdev->id.vendor_id;
3004 hw->subsystem_device_id = pdev->id.subsystem_device_id;
3005 hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3006 hw->revision_id = 0;
3007 hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3008 if (hw->hw_addr == NULL) {
3009 PMD_INIT_LOG(ERR, "Bad mem resource."
3010 " Try to blacklist unused devices.");
3014 /* Store fm10k_adapter pointer */
3015 hw->back = dev->data->dev_private;
3017 /* Initialize the shared code */
3018 diag = fm10k_init_shared_code(hw);
3019 if (diag != FM10K_SUCCESS) {
3020 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3024 /* Initialize parameters */
3025 fm10k_params_init(dev);
3027 /* Initialize the hw */
3028 diag = fm10k_init_hw(hw);
3029 if (diag != FM10K_SUCCESS) {
3030 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3034 /* Initialize MAC address(es) */
3035 dev->data->mac_addrs = rte_zmalloc("fm10k",
3036 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3037 if (dev->data->mac_addrs == NULL) {
3038 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3042 diag = fm10k_read_mac_addr(hw);
3044 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3045 &dev->data->mac_addrs[0]);
3047 if (diag != FM10K_SUCCESS ||
3048 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3050 /* Generate a random addr */
3051 eth_random_addr(hw->mac.addr);
3052 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3053 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3054 &dev->data->mac_addrs[0]);
3057 /* Reset the hw statistics */
3058 fm10k_stats_reset(dev);
3061 diag = fm10k_reset_hw(hw);
3062 if (diag != FM10K_SUCCESS) {
3063 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3067 /* Setup mailbox service */
3068 diag = fm10k_setup_mbx_service(hw);
3069 if (diag != FM10K_SUCCESS) {
3070 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3074 /*PF/VF has different interrupt handling mechanism */
3075 if (hw->mac.type == fm10k_mac_pf) {
3076 /* register callback func to eal lib */
3077 rte_intr_callback_register(intr_handle,
3078 fm10k_dev_interrupt_handler_pf, (void *)dev);
3080 /* enable MISC interrupt */
3081 fm10k_dev_enable_intr_pf(dev);
3083 rte_intr_callback_register(intr_handle,
3084 fm10k_dev_interrupt_handler_vf, (void *)dev);
3086 fm10k_dev_enable_intr_vf(dev);
3089 /* Enable intr after callback registered */
3090 rte_intr_enable(intr_handle);
3092 hw->mac.ops.update_int_moderator(hw);
3094 /* Make sure Switch Manager is ready before going forward. */
3095 if (hw->mac.type == fm10k_mac_pf) {
3096 int switch_ready = 0;
3098 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3100 hw->mac.ops.get_host_state(hw, &switch_ready);
3101 fm10k_mbx_unlock(hw);
3104 /* Delay some time to acquire async LPORT_MAP info. */
3105 rte_delay_us(WAIT_SWITCH_MSG_US);
3108 if (switch_ready == 0) {
3109 PMD_INIT_LOG(ERR, "switch is not ready");
3115 * Below function will trigger operations on mailbox, acquire lock to
3116 * avoid race condition from interrupt handler. Operations on mailbox
3117 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3118 * will handle and generate an interrupt to our side. Then, FIFO in
3119 * mailbox will be touched.
3122 /* Enable port first */
3123 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3126 /* Set unicast mode by default. App can change to other mode in other
3129 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3130 FM10K_XCAST_MODE_NONE);
3132 fm10k_mbx_unlock(hw);
3134 /* Make sure default VID is ready before going forward. */
3135 if (hw->mac.type == fm10k_mac_pf) {
3136 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3137 if (hw->mac.default_vid)
3139 /* Delay some time to acquire async port VLAN info. */
3140 rte_delay_us(WAIT_SWITCH_MSG_US);
3143 if (!hw->mac.default_vid) {
3144 PMD_INIT_LOG(ERR, "default VID is not ready");
3149 /* Add default mac address */
3150 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3151 MAIN_VSI_POOL_NUMBER);
3157 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3159 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3160 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3161 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3162 PMD_INIT_FUNC_TRACE();
3164 /* only uninitialize in the primary process */
3165 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3168 /* safe to close dev here */
3169 fm10k_dev_close(dev);
3171 dev->dev_ops = NULL;
3172 dev->rx_pkt_burst = NULL;
3173 dev->tx_pkt_burst = NULL;
3175 /* disable uio/vfio intr */
3176 rte_intr_disable(intr_handle);
3178 /*PF/VF has different interrupt handling mechanism */
3179 if (hw->mac.type == fm10k_mac_pf) {
3180 /* disable interrupt */
3181 fm10k_dev_disable_intr_pf(dev);
3183 /* unregister callback func to eal lib */
3184 rte_intr_callback_unregister(intr_handle,
3185 fm10k_dev_interrupt_handler_pf, (void *)dev);
3187 /* disable interrupt */
3188 fm10k_dev_disable_intr_vf(dev);
3190 rte_intr_callback_unregister(intr_handle,
3191 fm10k_dev_interrupt_handler_vf, (void *)dev);
3194 /* free mac memory */
3195 if (dev->data->mac_addrs) {
3196 rte_free(dev->data->mac_addrs);
3197 dev->data->mac_addrs = NULL;
3200 memset(hw, 0, sizeof(*hw));
3205 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3206 struct rte_pci_device *pci_dev)
3208 return rte_eth_dev_pci_generic_probe(pci_dev,
3209 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3212 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3214 return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3218 * The set of PCI devices this driver supports. This driver will enable both PF
3219 * and SRIOV-VF devices.
3221 static const struct rte_pci_id pci_id_fm10k_map[] = {
3222 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3223 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3224 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3225 { .vendor_id = 0, /* sentinel */ },
3228 static struct rte_pci_driver rte_pmd_fm10k = {
3229 .id_table = pci_id_fm10k_map,
3230 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3231 RTE_PCI_DRV_IOVA_AS_VA,
3232 .probe = eth_fm10k_pci_probe,
3233 .remove = eth_fm10k_pci_remove,
3236 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3237 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3238 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3240 RTE_INIT(fm10k_init_log);
3242 fm10k_init_log(void)
3244 fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
3245 if (fm10k_logtype_init >= 0)
3246 rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
3247 fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
3248 if (fm10k_logtype_driver >= 0)
3249 rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);