1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
15 #include "base/fm10k_api.h"
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
21 #define MAIN_VSI_POOL_NUMBER 0
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US 100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM 128
38 #define GLORT_FD_Q_BASE 0x40
39 #define GLORT_PF_MASK 0xFFC0
40 #define GLORT_FD_MASK GLORT_PF_MASK
41 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
43 int fm10k_logtype_init;
44 int fm10k_logtype_driver;
46 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
47 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
48 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
49 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
50 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
51 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
53 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
54 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
55 const u8 *mac, bool add, uint32_t pool);
56 static void fm10k_tx_queue_release(void *queue);
57 static void fm10k_rx_queue_release(void *queue);
58 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
59 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
60 static int fm10k_check_ftag(struct rte_devargs *devargs);
61 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
63 static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
64 struct rte_eth_dev_info *dev_info);
65 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
66 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
67 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
68 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
70 struct fm10k_xstats_name_off {
71 char name[RTE_ETH_XSTATS_NAME_SIZE];
75 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
76 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
77 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
78 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
79 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
80 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
81 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
82 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
83 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
87 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
88 sizeof(fm10k_hw_stats_strings[0]))
90 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
91 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
92 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
93 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
96 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
97 sizeof(fm10k_hw_stats_rx_q_strings[0]))
99 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
100 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
101 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
104 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
105 sizeof(fm10k_hw_stats_tx_q_strings[0]))
107 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
108 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
110 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
113 fm10k_mbx_initlock(struct fm10k_hw *hw)
115 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
119 fm10k_mbx_lock(struct fm10k_hw *hw)
121 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
122 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
126 fm10k_mbx_unlock(struct fm10k_hw *hw)
128 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
131 /* Stubs needed for linkage when vPMD is disabled */
133 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
140 __rte_unused void *rx_queue,
141 __rte_unused struct rte_mbuf **rx_pkts,
142 __rte_unused uint16_t nb_pkts)
148 fm10k_recv_scattered_pkts_vec(
149 __rte_unused void *rx_queue,
150 __rte_unused struct rte_mbuf **rx_pkts,
151 __rte_unused uint16_t nb_pkts)
157 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
164 fm10k_rx_queue_release_mbufs_vec(
165 __rte_unused struct fm10k_rx_queue *rxq)
171 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
177 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
183 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
184 __rte_unused struct rte_mbuf **tx_pkts,
185 __rte_unused uint16_t nb_pkts)
191 * reset queue to initial state, allocate software buffers used when starting
193 * return 0 on success
194 * return -ENOMEM if buffers cannot be allocated
195 * return -EINVAL if buffers do not satisfy alignment condition
198 rx_queue_reset(struct fm10k_rx_queue *q)
200 static const union fm10k_rx_desc zero = {{0} };
203 PMD_INIT_FUNC_TRACE();
205 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
209 for (i = 0; i < q->nb_desc; ++i) {
210 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
211 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
212 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
216 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
217 q->hw_ring[i].q.pkt_addr = dma_addr;
218 q->hw_ring[i].q.hdr_addr = dma_addr;
221 /* initialize extra software ring entries. Space for these extra
222 * entries is always allocated.
224 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
225 for (i = 0; i < q->nb_fake_desc; ++i) {
226 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
227 q->hw_ring[q->nb_desc + i] = zero;
232 q->next_trigger = q->alloc_thresh - 1;
233 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
234 q->rxrearm_start = 0;
241 * clean queue, descriptor rings, free software buffers used when stopping
245 rx_queue_clean(struct fm10k_rx_queue *q)
247 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
249 PMD_INIT_FUNC_TRACE();
251 /* zero descriptor rings */
252 for (i = 0; i < q->nb_desc; ++i)
253 q->hw_ring[i] = zero;
255 /* zero faked descriptors */
256 for (i = 0; i < q->nb_fake_desc; ++i)
257 q->hw_ring[q->nb_desc + i] = zero;
259 /* vPMD driver has a different way of releasing mbufs. */
260 if (q->rx_using_sse) {
261 fm10k_rx_queue_release_mbufs_vec(q);
265 /* free software buffers */
266 for (i = 0; i < q->nb_desc; ++i) {
268 rte_pktmbuf_free_seg(q->sw_ring[i]);
269 q->sw_ring[i] = NULL;
275 * free all queue memory used when releasing the queue (i.e. configure)
278 rx_queue_free(struct fm10k_rx_queue *q)
280 PMD_INIT_FUNC_TRACE();
282 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
285 rte_free(q->sw_ring);
294 * disable RX queue, wait unitl HW finished necessary flush operation
297 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
301 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
302 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
303 reg & ~FM10K_RXQCTL_ENABLE);
305 /* Wait 100us at most */
306 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
308 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
309 if (!(reg & FM10K_RXQCTL_ENABLE))
313 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
320 * reset queue to initial state, allocate software buffers used when starting
324 tx_queue_reset(struct fm10k_tx_queue *q)
326 PMD_INIT_FUNC_TRACE();
330 q->nb_free = q->nb_desc - 1;
331 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
332 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
336 * clean queue, descriptor rings, free software buffers used when stopping
340 tx_queue_clean(struct fm10k_tx_queue *q)
342 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
344 PMD_INIT_FUNC_TRACE();
346 /* zero descriptor rings */
347 for (i = 0; i < q->nb_desc; ++i)
348 q->hw_ring[i] = zero;
350 /* free software buffers */
351 for (i = 0; i < q->nb_desc; ++i) {
353 rte_pktmbuf_free_seg(q->sw_ring[i]);
354 q->sw_ring[i] = NULL;
360 * free all queue memory used when releasing the queue (i.e. configure)
363 tx_queue_free(struct fm10k_tx_queue *q)
365 PMD_INIT_FUNC_TRACE();
367 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
369 if (q->rs_tracker.list) {
370 rte_free(q->rs_tracker.list);
371 q->rs_tracker.list = NULL;
374 rte_free(q->sw_ring);
383 * disable TX queue, wait unitl HW finished necessary flush operation
386 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
390 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
391 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
392 reg & ~FM10K_TXDCTL_ENABLE);
394 /* Wait 100us at most */
395 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
397 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
398 if (!(reg & FM10K_TXDCTL_ENABLE))
402 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
409 fm10k_check_mq_mode(struct rte_eth_dev *dev)
411 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
412 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
413 struct rte_eth_vmdq_rx_conf *vmdq_conf;
414 uint16_t nb_rx_q = dev->data->nb_rx_queues;
416 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
418 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
419 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
423 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
426 if (hw->mac.type == fm10k_mac_vf) {
427 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
431 /* Check VMDQ queue pool number */
432 if (vmdq_conf->nb_queue_pools >
433 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
434 vmdq_conf->nb_queue_pools > nb_rx_q) {
435 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
436 vmdq_conf->nb_queue_pools);
443 static const struct fm10k_txq_ops def_txq_ops = {
444 .reset = tx_queue_reset,
448 fm10k_dev_configure(struct rte_eth_dev *dev)
452 PMD_INIT_FUNC_TRACE();
454 /* multipe queue mode checking */
455 ret = fm10k_check_mq_mode(dev);
457 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
462 dev->data->scattered_rx = 0;
467 /* fls = find last set bit = 32 minus the number of leading zeros */
469 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
473 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
475 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
476 struct rte_eth_vmdq_rx_conf *vmdq_conf;
479 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
481 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
482 if (!vmdq_conf->pool_map[i].pools)
485 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
486 fm10k_mbx_unlock(hw);
491 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
493 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495 /* Add default mac address */
496 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
497 MAIN_VSI_POOL_NUMBER);
501 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
503 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
504 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
505 uint32_t mrqc, *key, i, reta, j;
508 #define RSS_KEY_SIZE 40
509 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
510 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
511 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
512 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
513 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
514 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
517 if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
518 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
519 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
523 /* random key is rss_intel_key (default) or user provided (rss_key) */
524 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
525 key = (uint32_t *)rss_intel_key;
527 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
529 /* Now fill our hash function seeds, 4 bytes at a time */
530 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
531 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
534 * Fill in redirection table
535 * The byte-swap is needed because NIC registers are in
536 * little-endian order.
539 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
540 if (j == dev->data->nb_rx_queues)
542 reta = (reta << CHAR_BIT) | j;
544 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
549 * Generate RSS hash based on packet types, TCP/UDP
550 * port numbers and/or IPv4/v6 src and dst addresses
552 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
554 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
555 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
556 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
557 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
558 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
559 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
560 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
561 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
562 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
565 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
570 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
574 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
576 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579 for (i = 0; i < nb_lport_new; i++) {
580 /* Set unicast mode by default. App can change
581 * to other mode in other API func.
584 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
585 FM10K_XCAST_MODE_NONE);
586 fm10k_mbx_unlock(hw);
591 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
593 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594 struct rte_eth_vmdq_rx_conf *vmdq_conf;
595 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
596 struct fm10k_macvlan_filter_info *macvlan;
597 uint16_t nb_queue_pools = 0; /* pool number in configuration */
598 uint16_t nb_lport_new;
600 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
601 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
603 fm10k_dev_rss_configure(dev);
605 /* only PF supports VMDQ */
606 if (hw->mac.type != fm10k_mac_pf)
609 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
610 nb_queue_pools = vmdq_conf->nb_queue_pools;
612 /* no pool number change, no need to update logic port and VLAN/MAC */
613 if (macvlan->nb_queue_pools == nb_queue_pools)
616 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
617 fm10k_dev_logic_port_update(dev, nb_lport_new);
619 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
620 memset(dev->data->mac_addrs, 0,
621 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
622 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
623 &dev->data->mac_addrs[0]);
624 memset(macvlan, 0, sizeof(*macvlan));
625 macvlan->nb_queue_pools = nb_queue_pools;
628 fm10k_dev_vmdq_rx_configure(dev);
630 fm10k_dev_pf_main_vsi_reset(dev);
634 fm10k_dev_tx_init(struct rte_eth_dev *dev)
636 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
638 struct fm10k_tx_queue *txq;
642 /* Disable TXINT to avoid possible interrupt */
643 for (i = 0; i < hw->mac.max_queues; i++)
644 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
645 3 << FM10K_TXINT_TIMER_SHIFT);
648 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
649 txq = dev->data->tx_queues[i];
650 base_addr = txq->hw_ring_phys_addr;
651 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
653 /* disable queue to avoid issues while updating state */
654 ret = tx_queue_disable(hw, i);
656 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
659 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
660 * register is read-only for VF.
662 if (fm10k_check_ftag(dev->device->devargs)) {
663 if (hw->mac.type == fm10k_mac_pf) {
664 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
665 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
666 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
668 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
673 /* set location and size for descriptor ring */
674 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
675 base_addr & UINT64_LOWER_32BITS_MASK);
676 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
677 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
678 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
680 /* assign default SGLORT for each TX queue by PF */
681 if (hw->mac.type == fm10k_mac_pf)
682 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
685 /* set up vector or scalar TX function as appropriate */
686 fm10k_set_tx_function(dev);
692 fm10k_dev_rx_init(struct rte_eth_dev *dev)
694 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
695 struct fm10k_macvlan_filter_info *macvlan;
696 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
697 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
699 struct fm10k_rx_queue *rxq;
702 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
703 uint32_t logic_port = hw->mac.dglort_map;
705 uint16_t queue_stride = 0;
707 /* enable RXINT for interrupt mode */
709 if (rte_intr_dp_is_en(intr_handle)) {
710 for (; i < dev->data->nb_rx_queues; i++) {
711 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
712 if (hw->mac.type == fm10k_mac_pf)
713 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
715 FM10K_ITR_MASK_CLEAR);
717 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
719 FM10K_ITR_MASK_CLEAR);
722 /* Disable other RXINT to avoid possible interrupt */
723 for (; i < hw->mac.max_queues; i++)
724 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
725 3 << FM10K_RXINT_TIMER_SHIFT);
727 /* Setup RX queues */
728 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
729 rxq = dev->data->rx_queues[i];
730 base_addr = rxq->hw_ring_phys_addr;
731 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
733 /* disable queue to avoid issues while updating state */
734 ret = rx_queue_disable(hw, i);
736 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
740 /* Setup the Base and Length of the Rx Descriptor Ring */
741 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
742 base_addr & UINT64_LOWER_32BITS_MASK);
743 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
744 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
745 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
747 /* Configure the Rx buffer size for one buff without split */
748 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
749 RTE_PKTMBUF_HEADROOM);
750 /* As RX buffer is aligned to 512B within mbuf, some bytes are
751 * reserved for this purpose, and the worst case could be 511B.
752 * But SRR reg assumes all buffers have the same size. In order
753 * to fill the gap, we'll have to consider the worst case and
754 * assume 512B is reserved. If we don't do so, it's possible
755 * for HW to overwrite data to next mbuf.
757 buf_size -= FM10K_RX_DATABUF_ALIGN;
759 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
760 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
761 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
763 /* It adds dual VLAN length for supporting dual VLAN */
764 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
765 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
766 rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
768 dev->data->scattered_rx = 1;
769 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
770 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
771 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
774 /* Enable drop on empty, it's RO for VF */
775 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
776 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
778 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
779 FM10K_WRITE_FLUSH(hw);
782 /* Configure VMDQ/RSS if applicable */
783 fm10k_dev_mq_rx_configure(dev);
785 /* Decide the best RX function */
786 fm10k_set_rx_function(dev);
788 /* update RX_SGLORT for loopback suppress*/
789 if (hw->mac.type != fm10k_mac_pf)
791 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
792 if (macvlan->nb_queue_pools)
793 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
794 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
795 if (i && queue_stride && !(i % queue_stride))
797 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
804 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
806 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
809 struct fm10k_rx_queue *rxq;
811 PMD_INIT_FUNC_TRACE();
813 rxq = dev->data->rx_queues[rx_queue_id];
814 err = rx_queue_reset(rxq);
815 if (err == -ENOMEM) {
816 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
818 } else if (err == -EINVAL) {
819 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
824 /* Setup the HW Rx Head and Tail Descriptor Pointers
825 * Note: this must be done AFTER the queue is enabled on real
826 * hardware, but BEFORE the queue is enabled when using the
827 * emulation platform. Do it in both places for now and remove
828 * this comment and the following two register writes when the
829 * emulation platform is no longer being used.
831 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
832 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
834 /* Set PF ownership flag for PF devices */
835 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
836 if (hw->mac.type == fm10k_mac_pf)
837 reg |= FM10K_RXQCTL_PF;
838 reg |= FM10K_RXQCTL_ENABLE;
839 /* enable RX queue */
840 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
841 FM10K_WRITE_FLUSH(hw);
843 /* Setup the HW Rx Head and Tail Descriptor Pointers
844 * Note: this must be done AFTER the queue is enabled
846 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
847 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
848 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
854 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
856 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 PMD_INIT_FUNC_TRACE();
860 /* Disable RX queue */
861 rx_queue_disable(hw, rx_queue_id);
863 /* Free mbuf and clean HW ring */
864 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
865 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
871 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
873 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 /** @todo - this should be defined in the shared code */
875 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
876 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
877 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
879 PMD_INIT_FUNC_TRACE();
883 /* reset head and tail pointers */
884 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
885 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
887 /* enable TX queue */
888 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
889 FM10K_TXDCTL_ENABLE | txdctl);
890 FM10K_WRITE_FLUSH(hw);
891 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
897 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
899 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901 PMD_INIT_FUNC_TRACE();
903 tx_queue_disable(hw, tx_queue_id);
904 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
905 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
910 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
912 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
913 != FM10K_DGLORTMAP_NONE);
917 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
919 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
922 PMD_INIT_FUNC_TRACE();
924 /* Return if it didn't acquire valid glort range */
925 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
929 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
930 FM10K_XCAST_MODE_PROMISC);
931 fm10k_mbx_unlock(hw);
933 if (status != FM10K_SUCCESS)
934 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
938 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
940 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
944 PMD_INIT_FUNC_TRACE();
946 /* Return if it didn't acquire valid glort range */
947 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
950 if (dev->data->all_multicast == 1)
951 mode = FM10K_XCAST_MODE_ALLMULTI;
953 mode = FM10K_XCAST_MODE_NONE;
956 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
958 fm10k_mbx_unlock(hw);
960 if (status != FM10K_SUCCESS)
961 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
965 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
967 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
970 PMD_INIT_FUNC_TRACE();
972 /* Return if it didn't acquire valid glort range */
973 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
976 /* If promiscuous mode is enabled, it doesn't make sense to enable
977 * allmulticast and disable promiscuous since fm10k only can select
980 if (dev->data->promiscuous) {
981 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
982 "needn't enable allmulticast");
987 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
988 FM10K_XCAST_MODE_ALLMULTI);
989 fm10k_mbx_unlock(hw);
991 if (status != FM10K_SUCCESS)
992 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
996 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
998 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1001 PMD_INIT_FUNC_TRACE();
1003 /* Return if it didn't acquire valid glort range */
1004 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1007 if (dev->data->promiscuous) {
1008 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1009 "since promisc mode is enabled");
1014 /* Change mode to unicast mode */
1015 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1016 FM10K_XCAST_MODE_NONE);
1017 fm10k_mbx_unlock(hw);
1019 if (status != FM10K_SUCCESS)
1020 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1024 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1026 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1027 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1028 uint16_t nb_queue_pools;
1029 struct fm10k_macvlan_filter_info *macvlan;
1031 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1032 nb_queue_pools = macvlan->nb_queue_pools;
1033 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1034 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1036 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1037 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1038 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1040 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1041 /* Configure VMDQ/RSS DGlort Decoder */
1042 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1044 /* Flow Director configurations, only queue number is valid. */
1045 dglortdec = fls(dev->data->nb_rx_queues - 1);
1046 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1047 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1048 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1049 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1051 /* Invalidate all other GLORT entries */
1052 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1053 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1054 FM10K_DGLORTMAP_NONE);
1057 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1059 fm10k_dev_start(struct rte_eth_dev *dev)
1061 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1064 PMD_INIT_FUNC_TRACE();
1066 /* stop, init, then start the hw */
1067 diag = fm10k_stop_hw(hw);
1068 if (diag != FM10K_SUCCESS) {
1069 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1073 diag = fm10k_init_hw(hw);
1074 if (diag != FM10K_SUCCESS) {
1075 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1079 diag = fm10k_start_hw(hw);
1080 if (diag != FM10K_SUCCESS) {
1081 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1085 diag = fm10k_dev_tx_init(dev);
1087 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1091 if (fm10k_dev_rxq_interrupt_setup(dev))
1094 diag = fm10k_dev_rx_init(dev);
1096 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1100 if (hw->mac.type == fm10k_mac_pf)
1101 fm10k_dev_dglort_map_configure(dev);
1103 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1104 struct fm10k_rx_queue *rxq;
1105 rxq = dev->data->rx_queues[i];
1107 if (rxq->rx_deferred_start)
1109 diag = fm10k_dev_rx_queue_start(dev, i);
1112 for (j = 0; j < i; ++j)
1113 rx_queue_clean(dev->data->rx_queues[j]);
1118 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1119 struct fm10k_tx_queue *txq;
1120 txq = dev->data->tx_queues[i];
1122 if (txq->tx_deferred_start)
1124 diag = fm10k_dev_tx_queue_start(dev, i);
1127 for (j = 0; j < i; ++j)
1128 tx_queue_clean(dev->data->tx_queues[j]);
1129 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1130 rx_queue_clean(dev->data->rx_queues[j]);
1135 /* Update default vlan when not in VMDQ mode */
1136 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1137 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1139 fm10k_link_update(dev, 0);
1145 fm10k_dev_stop(struct rte_eth_dev *dev)
1147 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1149 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1152 PMD_INIT_FUNC_TRACE();
1154 if (dev->data->tx_queues)
1155 for (i = 0; i < dev->data->nb_tx_queues; i++)
1156 fm10k_dev_tx_queue_stop(dev, i);
1158 if (dev->data->rx_queues)
1159 for (i = 0; i < dev->data->nb_rx_queues; i++)
1160 fm10k_dev_rx_queue_stop(dev, i);
1162 /* Disable datapath event */
1163 if (rte_intr_dp_is_en(intr_handle)) {
1164 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1165 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1166 3 << FM10K_RXINT_TIMER_SHIFT);
1167 if (hw->mac.type == fm10k_mac_pf)
1168 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1169 FM10K_ITR_MASK_SET);
1171 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1172 FM10K_ITR_MASK_SET);
1175 /* Clean datapath event and queue/vec mapping */
1176 rte_intr_efd_disable(intr_handle);
1177 rte_free(intr_handle->intr_vec);
1178 intr_handle->intr_vec = NULL;
1182 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1186 PMD_INIT_FUNC_TRACE();
1188 if (dev->data->tx_queues) {
1189 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1190 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1196 if (dev->data->rx_queues) {
1197 for (i = 0; i < dev->data->nb_rx_queues; i++)
1198 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1203 fm10k_dev_close(struct rte_eth_dev *dev)
1205 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207 PMD_INIT_FUNC_TRACE();
1210 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1211 MAX_LPORT_NUM, false);
1212 fm10k_mbx_unlock(hw);
1214 /* allow 100ms for device to quiesce */
1215 rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1217 /* Stop mailbox service first */
1218 fm10k_close_mbx_service(hw);
1219 fm10k_dev_stop(dev);
1220 fm10k_dev_queue_release(dev);
1225 fm10k_link_update(struct rte_eth_dev *dev,
1226 __rte_unused int wait_to_complete)
1228 struct fm10k_dev_info *dev_info =
1229 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1230 PMD_INIT_FUNC_TRACE();
1232 dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
1233 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1234 dev->data->dev_link.link_status =
1235 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1236 dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
1241 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1242 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1247 if (xstats_names != NULL) {
1248 /* Note: limit checked in rte_eth_xstats_names() */
1251 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1252 snprintf(xstats_names[count].name,
1253 sizeof(xstats_names[count].name),
1254 "%s", fm10k_hw_stats_strings[count].name);
1258 /* PF queue stats */
1259 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1260 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1261 snprintf(xstats_names[count].name,
1262 sizeof(xstats_names[count].name),
1264 fm10k_hw_stats_rx_q_strings[i].name);
1267 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1268 snprintf(xstats_names[count].name,
1269 sizeof(xstats_names[count].name),
1271 fm10k_hw_stats_tx_q_strings[i].name);
1276 return FM10K_NB_XSTATS;
1280 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1283 struct fm10k_hw_stats *hw_stats =
1284 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1285 unsigned i, q, count = 0;
1287 if (n < FM10K_NB_XSTATS)
1288 return FM10K_NB_XSTATS;
1291 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1292 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1293 fm10k_hw_stats_strings[count].offset);
1294 xstats[count].id = count;
1298 /* PF queue stats */
1299 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1300 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1301 xstats[count].value =
1302 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1303 fm10k_hw_stats_rx_q_strings[i].offset);
1304 xstats[count].id = count;
1307 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1308 xstats[count].value =
1309 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1310 fm10k_hw_stats_tx_q_strings[i].offset);
1311 xstats[count].id = count;
1316 return FM10K_NB_XSTATS;
1320 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1322 uint64_t ipackets, opackets, ibytes, obytes, imissed;
1323 struct fm10k_hw *hw =
1324 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1325 struct fm10k_hw_stats *hw_stats =
1326 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1329 PMD_INIT_FUNC_TRACE();
1331 fm10k_update_hw_stats(hw, hw_stats);
1333 ipackets = opackets = ibytes = obytes = imissed = 0;
1334 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1335 (i < hw->mac.max_queues); ++i) {
1336 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1337 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1338 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1339 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1340 stats->q_errors[i] = hw_stats->q[i].rx_drops.count;
1341 ipackets += stats->q_ipackets[i];
1342 opackets += stats->q_opackets[i];
1343 ibytes += stats->q_ibytes[i];
1344 obytes += stats->q_obytes[i];
1345 imissed += stats->q_errors[i];
1347 stats->ipackets = ipackets;
1348 stats->opackets = opackets;
1349 stats->ibytes = ibytes;
1350 stats->obytes = obytes;
1351 stats->imissed = imissed;
1356 fm10k_stats_reset(struct rte_eth_dev *dev)
1358 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359 struct fm10k_hw_stats *hw_stats =
1360 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1362 PMD_INIT_FUNC_TRACE();
1364 memset(hw_stats, 0, sizeof(*hw_stats));
1365 fm10k_rebind_hw_stats(hw, hw_stats);
1369 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1370 struct rte_eth_dev_info *dev_info)
1372 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1375 PMD_INIT_FUNC_TRACE();
1377 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1378 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1379 dev_info->max_rx_queues = hw->mac.max_queues;
1380 dev_info->max_tx_queues = hw->mac.max_queues;
1381 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1382 dev_info->max_hash_mac_addrs = 0;
1383 dev_info->max_vfs = pdev->max_vfs;
1384 dev_info->vmdq_pool_base = 0;
1385 dev_info->vmdq_queue_base = 0;
1386 dev_info->max_vmdq_pools = ETH_32_POOLS;
1387 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1388 dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
1389 dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
1390 dev_info->rx_queue_offload_capa;
1391 dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
1392 dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
1393 dev_info->tx_queue_offload_capa;
1395 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1396 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1398 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1400 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1401 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1402 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1404 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1409 dev_info->default_txconf = (struct rte_eth_txconf) {
1411 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1412 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1413 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1415 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1416 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1420 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1421 .nb_max = FM10K_MAX_RX_DESC,
1422 .nb_min = FM10K_MIN_RX_DESC,
1423 .nb_align = FM10K_MULT_RX_DESC,
1426 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1427 .nb_max = FM10K_MAX_TX_DESC,
1428 .nb_min = FM10K_MIN_TX_DESC,
1429 .nb_align = FM10K_MULT_TX_DESC,
1430 .nb_seg_max = FM10K_TX_MAX_SEG,
1431 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1434 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1435 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1436 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1439 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1440 static const uint32_t *
1441 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1443 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1444 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1445 static uint32_t ptypes[] = {
1446 /* refers to rx_desc_to_ol_flags() */
1449 RTE_PTYPE_L3_IPV4_EXT,
1451 RTE_PTYPE_L3_IPV6_EXT,
1458 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1459 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1460 static uint32_t ptypes_vec[] = {
1461 /* refers to fm10k_desc_to_pktype_v() */
1463 RTE_PTYPE_L3_IPV4_EXT,
1465 RTE_PTYPE_L3_IPV6_EXT,
1468 RTE_PTYPE_TUNNEL_GENEVE,
1469 RTE_PTYPE_TUNNEL_NVGRE,
1470 RTE_PTYPE_TUNNEL_VXLAN,
1471 RTE_PTYPE_TUNNEL_GRE,
1481 static const uint32_t *
1482 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1489 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1492 uint16_t mac_num = 0;
1493 uint32_t vid_idx, vid_bit, mac_index;
1494 struct fm10k_hw *hw;
1495 struct fm10k_macvlan_filter_info *macvlan;
1496 struct rte_eth_dev_data *data = dev->data;
1498 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1501 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1502 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1506 if (vlan_id > ETH_VLAN_ID_MAX) {
1507 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1511 vid_idx = FM10K_VFTA_IDX(vlan_id);
1512 vid_bit = FM10K_VFTA_BIT(vlan_id);
1513 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1514 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1516 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1517 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1518 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1519 "in the VLAN filter table");
1524 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1525 fm10k_mbx_unlock(hw);
1526 if (result != FM10K_SUCCESS) {
1527 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1531 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1532 (result == FM10K_SUCCESS); mac_index++) {
1533 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1535 if (mac_num > macvlan->mac_num - 1) {
1536 PMD_INIT_LOG(ERR, "MAC address number "
1541 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1542 data->mac_addrs[mac_index].addr_bytes,
1544 fm10k_mbx_unlock(hw);
1547 if (result != FM10K_SUCCESS) {
1548 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1553 macvlan->vlan_num++;
1554 macvlan->vfta[vid_idx] |= vid_bit;
1556 macvlan->vlan_num--;
1557 macvlan->vfta[vid_idx] &= ~vid_bit;
1563 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1565 if (mask & ETH_VLAN_STRIP_MASK) {
1566 if (!(dev->data->dev_conf.rxmode.offloads &
1567 DEV_RX_OFFLOAD_VLAN_STRIP))
1568 PMD_INIT_LOG(ERR, "VLAN stripping is "
1569 "always on in fm10k");
1572 if (mask & ETH_VLAN_EXTEND_MASK) {
1573 if (dev->data->dev_conf.rxmode.offloads &
1574 DEV_RX_OFFLOAD_VLAN_EXTEND)
1575 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1576 "supported in fm10k");
1579 if (mask & ETH_VLAN_FILTER_MASK) {
1580 if (!(dev->data->dev_conf.rxmode.offloads &
1581 DEV_RX_OFFLOAD_VLAN_FILTER))
1582 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1588 /* Add/Remove a MAC address, and update filters to main VSI */
1589 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1590 const u8 *mac, bool add, uint32_t pool)
1592 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593 struct fm10k_macvlan_filter_info *macvlan;
1596 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1598 if (pool != MAIN_VSI_POOL_NUMBER) {
1599 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1600 "mac to pool %u", pool);
1603 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1604 if (!macvlan->vfta[j])
1606 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1607 if (!(macvlan->vfta[j] & (1 << k)))
1609 if (i + 1 > macvlan->vlan_num) {
1610 PMD_INIT_LOG(ERR, "vlan number not match");
1614 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1615 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1616 fm10k_mbx_unlock(hw);
1622 /* Add/Remove a MAC address, and update filters to VMDQ */
1623 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1624 const u8 *mac, bool add, uint32_t pool)
1626 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1627 struct fm10k_macvlan_filter_info *macvlan;
1628 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1631 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1632 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1634 if (pool > macvlan->nb_queue_pools) {
1635 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1637 pool, macvlan->nb_queue_pools);
1640 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1641 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1644 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1645 vmdq_conf->pool_map[i].vlan_id, add, 0);
1646 fm10k_mbx_unlock(hw);
1650 /* Add/Remove a MAC address, and update filters */
1651 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1652 const u8 *mac, bool add, uint32_t pool)
1654 struct fm10k_macvlan_filter_info *macvlan;
1656 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1658 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1659 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1661 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1669 /* Add a MAC address, and update filters */
1671 fm10k_macaddr_add(struct rte_eth_dev *dev,
1672 struct ether_addr *mac_addr,
1676 struct fm10k_macvlan_filter_info *macvlan;
1678 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1679 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1680 macvlan->mac_vmdq_id[index] = pool;
1684 /* Remove a MAC address, and update filters */
1686 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1688 struct rte_eth_dev_data *data = dev->data;
1689 struct fm10k_macvlan_filter_info *macvlan;
1691 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1692 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1693 FALSE, macvlan->mac_vmdq_id[index]);
1694 macvlan->mac_vmdq_id[index] = 0;
1698 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1700 if ((request < min) || (request > max) || ((request % mult) != 0))
1708 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1710 if ((request < min) || (request > max) || ((div % request) != 0))
1717 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1719 uint16_t rx_free_thresh;
1721 if (conf->rx_free_thresh == 0)
1722 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1724 rx_free_thresh = conf->rx_free_thresh;
1726 /* make sure the requested threshold satisfies the constraints */
1727 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1728 FM10K_RX_FREE_THRESH_MAX(q),
1729 FM10K_RX_FREE_THRESH_DIV(q),
1731 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1732 "less than or equal to %u, "
1733 "greater than or equal to %u, "
1734 "and a divisor of %u",
1735 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1736 FM10K_RX_FREE_THRESH_MIN(q),
1737 FM10K_RX_FREE_THRESH_DIV(q));
1741 q->alloc_thresh = rx_free_thresh;
1742 q->drop_en = conf->rx_drop_en;
1743 q->rx_deferred_start = conf->rx_deferred_start;
1749 * Hardware requires specific alignment for Rx packet buffers. At
1750 * least one of the following two conditions must be satisfied.
1751 * 1. Address is 512B aligned
1752 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1754 * As such, the driver may need to adjust the DMA address within the
1755 * buffer by up to 512B.
1757 * return 1 if the element size is valid, otherwise return 0.
1760 mempool_element_size_valid(struct rte_mempool *mp)
1764 /* elt_size includes mbuf header and headroom */
1765 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1766 RTE_PKTMBUF_HEADROOM;
1768 /* account for up to 512B of alignment */
1769 min_size -= FM10K_RX_DATABUF_ALIGN;
1771 /* sanity check for overflow */
1772 if (min_size > mp->elt_size)
1779 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1783 return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
1786 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1790 return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
1791 DEV_RX_OFFLOAD_VLAN_FILTER |
1792 DEV_RX_OFFLOAD_IPV4_CKSUM |
1793 DEV_RX_OFFLOAD_UDP_CKSUM |
1794 DEV_RX_OFFLOAD_TCP_CKSUM |
1795 DEV_RX_OFFLOAD_JUMBO_FRAME |
1796 DEV_RX_OFFLOAD_HEADER_SPLIT);
1800 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1801 uint16_t nb_desc, unsigned int socket_id,
1802 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1804 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805 struct fm10k_dev_info *dev_info =
1806 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1807 struct fm10k_rx_queue *q;
1808 const struct rte_memzone *mz;
1811 PMD_INIT_FUNC_TRACE();
1813 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1815 /* make sure the mempool element size can account for alignment. */
1816 if (!mempool_element_size_valid(mp)) {
1817 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1821 /* make sure a valid number of descriptors have been requested */
1822 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1823 FM10K_MULT_RX_DESC, nb_desc)) {
1824 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1825 "less than or equal to %"PRIu32", "
1826 "greater than or equal to %u, "
1827 "and a multiple of %u",
1828 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1829 FM10K_MULT_RX_DESC);
1834 * if this queue existed already, free the associated memory. The
1835 * queue cannot be reused in case we need to allocate memory on
1836 * different socket than was previously used.
1838 if (dev->data->rx_queues[queue_id] != NULL) {
1839 rx_queue_free(dev->data->rx_queues[queue_id]);
1840 dev->data->rx_queues[queue_id] = NULL;
1843 /* allocate memory for the queue structure */
1844 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1847 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1853 q->nb_desc = nb_desc;
1854 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1855 q->port_id = dev->data->port_id;
1856 q->queue_id = queue_id;
1857 q->tail_ptr = (volatile uint32_t *)
1858 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1859 q->offloads = offloads;
1860 if (handle_rxconf(q, conf))
1863 /* allocate memory for the software ring */
1864 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1865 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1866 RTE_CACHE_LINE_SIZE, socket_id);
1867 if (q->sw_ring == NULL) {
1868 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1874 * allocate memory for the hardware descriptor ring. A memzone large
1875 * enough to hold the maximum ring size is requested to allow for
1876 * resizing in later calls to the queue setup function.
1878 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1879 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1882 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1883 rte_free(q->sw_ring);
1887 q->hw_ring = mz->addr;
1888 q->hw_ring_phys_addr = mz->iova;
1890 /* Check if number of descs satisfied Vector requirement */
1891 if (!rte_is_power_of_2(nb_desc)) {
1892 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1893 "preconditions - canceling the feature for "
1894 "the whole port[%d]",
1895 q->queue_id, q->port_id);
1896 dev_info->rx_vec_allowed = false;
1898 fm10k_rxq_vec_setup(q);
1900 dev->data->rx_queues[queue_id] = q;
1905 fm10k_rx_queue_release(void *queue)
1907 PMD_INIT_FUNC_TRACE();
1909 rx_queue_free(queue);
1913 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1915 uint16_t tx_free_thresh;
1916 uint16_t tx_rs_thresh;
1918 /* constraint MACROs require that tx_free_thresh is configured
1919 * before tx_rs_thresh */
1920 if (conf->tx_free_thresh == 0)
1921 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1923 tx_free_thresh = conf->tx_free_thresh;
1925 /* make sure the requested threshold satisfies the constraints */
1926 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1927 FM10K_TX_FREE_THRESH_MAX(q),
1928 FM10K_TX_FREE_THRESH_DIV(q),
1930 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1931 "less than or equal to %u, "
1932 "greater than or equal to %u, "
1933 "and a divisor of %u",
1934 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1935 FM10K_TX_FREE_THRESH_MIN(q),
1936 FM10K_TX_FREE_THRESH_DIV(q));
1940 q->free_thresh = tx_free_thresh;
1942 if (conf->tx_rs_thresh == 0)
1943 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1945 tx_rs_thresh = conf->tx_rs_thresh;
1947 q->tx_deferred_start = conf->tx_deferred_start;
1949 /* make sure the requested threshold satisfies the constraints */
1950 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1951 FM10K_TX_RS_THRESH_MAX(q),
1952 FM10K_TX_RS_THRESH_DIV(q),
1954 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1955 "less than or equal to %u, "
1956 "greater than or equal to %u, "
1957 "and a divisor of %u",
1958 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1959 FM10K_TX_RS_THRESH_MIN(q),
1960 FM10K_TX_RS_THRESH_DIV(q));
1964 q->rs_thresh = tx_rs_thresh;
1969 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1976 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1980 return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
1981 DEV_TX_OFFLOAD_MULTI_SEGS |
1982 DEV_TX_OFFLOAD_IPV4_CKSUM |
1983 DEV_TX_OFFLOAD_UDP_CKSUM |
1984 DEV_TX_OFFLOAD_TCP_CKSUM |
1985 DEV_TX_OFFLOAD_TCP_TSO);
1989 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1990 uint16_t nb_desc, unsigned int socket_id,
1991 const struct rte_eth_txconf *conf)
1993 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1994 struct fm10k_tx_queue *q;
1995 const struct rte_memzone *mz;
1998 PMD_INIT_FUNC_TRACE();
2000 offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
2002 /* make sure a valid number of descriptors have been requested */
2003 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
2004 FM10K_MULT_TX_DESC, nb_desc)) {
2005 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
2006 "less than or equal to %"PRIu32", "
2007 "greater than or equal to %u, "
2008 "and a multiple of %u",
2009 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
2010 FM10K_MULT_TX_DESC);
2015 * if this queue existed already, free the associated memory. The
2016 * queue cannot be reused in case we need to allocate memory on
2017 * different socket than was previously used.
2019 if (dev->data->tx_queues[queue_id] != NULL) {
2020 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2023 dev->data->tx_queues[queue_id] = NULL;
2026 /* allocate memory for the queue structure */
2027 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2030 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2035 q->nb_desc = nb_desc;
2036 q->port_id = dev->data->port_id;
2037 q->queue_id = queue_id;
2038 q->offloads = offloads;
2039 q->ops = &def_txq_ops;
2040 q->tail_ptr = (volatile uint32_t *)
2041 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2042 if (handle_txconf(q, conf))
2045 /* allocate memory for the software ring */
2046 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2047 nb_desc * sizeof(struct rte_mbuf *),
2048 RTE_CACHE_LINE_SIZE, socket_id);
2049 if (q->sw_ring == NULL) {
2050 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2056 * allocate memory for the hardware descriptor ring. A memzone large
2057 * enough to hold the maximum ring size is requested to allow for
2058 * resizing in later calls to the queue setup function.
2060 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2061 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2064 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2065 rte_free(q->sw_ring);
2069 q->hw_ring = mz->addr;
2070 q->hw_ring_phys_addr = mz->iova;
2073 * allocate memory for the RS bit tracker. Enough slots to hold the
2074 * descriptor index for each RS bit needing to be set are required.
2076 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2077 ((nb_desc + 1) / q->rs_thresh) *
2079 RTE_CACHE_LINE_SIZE, socket_id);
2080 if (q->rs_tracker.list == NULL) {
2081 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2082 rte_free(q->sw_ring);
2087 dev->data->tx_queues[queue_id] = q;
2092 fm10k_tx_queue_release(void *queue)
2094 struct fm10k_tx_queue *q = queue;
2095 PMD_INIT_FUNC_TRACE();
2101 fm10k_reta_update(struct rte_eth_dev *dev,
2102 struct rte_eth_rss_reta_entry64 *reta_conf,
2105 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2106 uint16_t i, j, idx, shift;
2110 PMD_INIT_FUNC_TRACE();
2112 if (reta_size > FM10K_MAX_RSS_INDICES) {
2113 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2114 "(%d) doesn't match the number hardware can supported "
2115 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2120 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2121 * 128-entries in 32 registers
2123 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2124 idx = i / RTE_RETA_GROUP_SIZE;
2125 shift = i % RTE_RETA_GROUP_SIZE;
2126 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2127 BIT_MASK_PER_UINT32);
2132 if (mask != BIT_MASK_PER_UINT32)
2133 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2135 for (j = 0; j < CHARS_PER_UINT32; j++) {
2136 if (mask & (0x1 << j)) {
2138 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2139 reta |= reta_conf[idx].reta[shift + j] <<
2143 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2150 fm10k_reta_query(struct rte_eth_dev *dev,
2151 struct rte_eth_rss_reta_entry64 *reta_conf,
2154 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155 uint16_t i, j, idx, shift;
2159 PMD_INIT_FUNC_TRACE();
2161 if (reta_size < FM10K_MAX_RSS_INDICES) {
2162 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2163 "(%d) doesn't match the number hardware can supported "
2164 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2169 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2170 * 128-entries in 32 registers
2172 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2173 idx = i / RTE_RETA_GROUP_SIZE;
2174 shift = i % RTE_RETA_GROUP_SIZE;
2175 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2176 BIT_MASK_PER_UINT32);
2180 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2181 for (j = 0; j < CHARS_PER_UINT32; j++) {
2182 if (mask & (0x1 << j))
2183 reta_conf[idx].reta[shift + j] = ((reta >>
2184 CHAR_BIT * j) & UINT8_MAX);
2192 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2193 struct rte_eth_rss_conf *rss_conf)
2195 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2198 uint64_t hf = rss_conf->rss_hf;
2201 PMD_INIT_FUNC_TRACE();
2203 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2204 FM10K_RSSRK_ENTRIES_PER_REG))
2211 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2212 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2213 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2214 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2215 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2216 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2217 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2218 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2219 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2221 /* If the mapping doesn't fit any supported, return */
2226 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2227 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2229 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2235 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2236 struct rte_eth_rss_conf *rss_conf)
2238 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2239 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2244 PMD_INIT_FUNC_TRACE();
2246 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2247 FM10K_RSSRK_ENTRIES_PER_REG))
2251 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2252 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2254 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2256 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2257 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2258 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2259 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2260 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2261 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2262 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2263 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2264 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2266 rss_conf->rss_hf = hf;
2272 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2274 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2275 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2277 /* Bind all local non-queue interrupt to vector 0 */
2278 int_map |= FM10K_MISC_VEC_ID;
2280 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2281 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2282 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2283 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2284 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2285 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2287 /* Enable misc causes */
2288 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2289 FM10K_EIMR_ENABLE(THI_FAULT) |
2290 FM10K_EIMR_ENABLE(FUM_FAULT) |
2291 FM10K_EIMR_ENABLE(MAILBOX) |
2292 FM10K_EIMR_ENABLE(SWITCHREADY) |
2293 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2294 FM10K_EIMR_ENABLE(SRAMERROR) |
2295 FM10K_EIMR_ENABLE(VFLR));
2298 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2299 FM10K_ITR_MASK_CLEAR);
2300 FM10K_WRITE_FLUSH(hw);
2304 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2306 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2309 int_map |= FM10K_MISC_VEC_ID;
2311 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2312 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2313 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2314 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2315 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2316 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2318 /* Disable misc causes */
2319 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2320 FM10K_EIMR_DISABLE(THI_FAULT) |
2321 FM10K_EIMR_DISABLE(FUM_FAULT) |
2322 FM10K_EIMR_DISABLE(MAILBOX) |
2323 FM10K_EIMR_DISABLE(SWITCHREADY) |
2324 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2325 FM10K_EIMR_DISABLE(SRAMERROR) |
2326 FM10K_EIMR_DISABLE(VFLR));
2329 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2330 FM10K_WRITE_FLUSH(hw);
2334 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2336 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2337 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2339 /* Bind all local non-queue interrupt to vector 0 */
2340 int_map |= FM10K_MISC_VEC_ID;
2342 /* Only INT 0 available, other 15 are reserved. */
2343 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2346 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2347 FM10K_ITR_MASK_CLEAR);
2348 FM10K_WRITE_FLUSH(hw);
2352 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2354 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2355 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2357 int_map |= FM10K_MISC_VEC_ID;
2359 /* Only INT 0 available, other 15 are reserved. */
2360 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2363 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2364 FM10K_WRITE_FLUSH(hw);
2368 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2370 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2371 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2374 if (hw->mac.type == fm10k_mac_pf)
2375 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2376 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2378 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2379 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2380 rte_intr_enable(&pdev->intr_handle);
2385 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2387 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2388 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2391 if (hw->mac.type == fm10k_mac_pf)
2392 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2393 FM10K_ITR_MASK_SET);
2395 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2396 FM10K_ITR_MASK_SET);
2401 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2403 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2404 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2405 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2406 uint32_t intr_vector, vec;
2410 /* fm10k needs one separate interrupt for mailbox,
2411 * so only drivers which support multiple interrupt vectors
2412 * e.g. vfio-pci can work for fm10k interrupt mode
2414 if (!rte_intr_cap_multiple(intr_handle) ||
2415 dev->data->dev_conf.intr_conf.rxq == 0)
2418 intr_vector = dev->data->nb_rx_queues;
2420 /* disable interrupt first */
2421 rte_intr_disable(intr_handle);
2422 if (hw->mac.type == fm10k_mac_pf)
2423 fm10k_dev_disable_intr_pf(dev);
2425 fm10k_dev_disable_intr_vf(dev);
2427 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2428 PMD_INIT_LOG(ERR, "Failed to init event fd");
2432 if (rte_intr_dp_is_en(intr_handle) && !result) {
2433 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2434 dev->data->nb_rx_queues * sizeof(int), 0);
2435 if (intr_handle->intr_vec) {
2436 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2437 queue_id < dev->data->nb_rx_queues;
2439 intr_handle->intr_vec[queue_id] = vec;
2440 if (vec < intr_handle->nb_efd - 1
2441 + FM10K_RX_VEC_START)
2445 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2446 " intr_vec", dev->data->nb_rx_queues);
2447 rte_intr_efd_disable(intr_handle);
2452 if (hw->mac.type == fm10k_mac_pf)
2453 fm10k_dev_enable_intr_pf(dev);
2455 fm10k_dev_enable_intr_vf(dev);
2456 rte_intr_enable(intr_handle);
2457 hw->mac.ops.update_int_moderator(hw);
2462 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2464 struct fm10k_fault fault;
2466 const char *estr = "Unknown error";
2468 /* Process PCA fault */
2469 if (eicr & FM10K_EICR_PCA_FAULT) {
2470 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2473 switch (fault.type) {
2475 estr = "PCA_NO_FAULT"; break;
2476 case PCA_UNMAPPED_ADDR:
2477 estr = "PCA_UNMAPPED_ADDR"; break;
2478 case PCA_BAD_QACCESS_PF:
2479 estr = "PCA_BAD_QACCESS_PF"; break;
2480 case PCA_BAD_QACCESS_VF:
2481 estr = "PCA_BAD_QACCESS_VF"; break;
2482 case PCA_MALICIOUS_REQ:
2483 estr = "PCA_MALICIOUS_REQ"; break;
2484 case PCA_POISONED_TLP:
2485 estr = "PCA_POISONED_TLP"; break;
2487 estr = "PCA_TLP_ABORT"; break;
2491 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2492 estr, fault.func ? "VF" : "PF", fault.func,
2493 fault.address, fault.specinfo);
2496 /* Process THI fault */
2497 if (eicr & FM10K_EICR_THI_FAULT) {
2498 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2501 switch (fault.type) {
2503 estr = "THI_NO_FAULT"; break;
2504 case THI_MAL_DIS_Q_FAULT:
2505 estr = "THI_MAL_DIS_Q_FAULT"; break;
2509 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2510 estr, fault.func ? "VF" : "PF", fault.func,
2511 fault.address, fault.specinfo);
2514 /* Process FUM fault */
2515 if (eicr & FM10K_EICR_FUM_FAULT) {
2516 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2519 switch (fault.type) {
2521 estr = "FUM_NO_FAULT"; break;
2522 case FUM_UNMAPPED_ADDR:
2523 estr = "FUM_UNMAPPED_ADDR"; break;
2524 case FUM_POISONED_TLP:
2525 estr = "FUM_POISONED_TLP"; break;
2526 case FUM_BAD_VF_QACCESS:
2527 estr = "FUM_BAD_VF_QACCESS"; break;
2528 case FUM_ADD_DECODE_ERR:
2529 estr = "FUM_ADD_DECODE_ERR"; break;
2531 estr = "FUM_RO_ERROR"; break;
2532 case FUM_QPRC_CRC_ERROR:
2533 estr = "FUM_QPRC_CRC_ERROR"; break;
2534 case FUM_CSR_TIMEOUT:
2535 estr = "FUM_CSR_TIMEOUT"; break;
2536 case FUM_INVALID_TYPE:
2537 estr = "FUM_INVALID_TYPE"; break;
2538 case FUM_INVALID_LENGTH:
2539 estr = "FUM_INVALID_LENGTH"; break;
2540 case FUM_INVALID_BE:
2541 estr = "FUM_INVALID_BE"; break;
2542 case FUM_INVALID_ALIGN:
2543 estr = "FUM_INVALID_ALIGN"; break;
2547 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2548 estr, fault.func ? "VF" : "PF", fault.func,
2549 fault.address, fault.specinfo);
2554 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2559 * PF interrupt handler triggered by NIC for handling specific interrupt.
2562 * Pointer to interrupt handle.
2564 * The address of parameter (struct rte_eth_dev *) regsitered before.
2570 fm10k_dev_interrupt_handler_pf(void *param)
2572 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2573 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2574 uint32_t cause, status;
2575 struct fm10k_dev_info *dev_info =
2576 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2580 if (hw->mac.type != fm10k_mac_pf)
2583 cause = FM10K_READ_REG(hw, FM10K_EICR);
2585 /* Handle PCI fault cases */
2586 if (cause & FM10K_EICR_FAULT_MASK) {
2587 PMD_INIT_LOG(ERR, "INT: find fault!");
2588 fm10k_dev_handle_fault(hw, cause);
2591 /* Handle switch up/down */
2592 if (cause & FM10K_EICR_SWITCHNOTREADY)
2593 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2595 if (cause & FM10K_EICR_SWITCHREADY) {
2596 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2597 if (dev_info->sm_down == 1) {
2600 /* For recreating logical ports */
2601 status_mbx = hw->mac.ops.update_lport_state(hw,
2602 hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2603 if (status_mbx == FM10K_SUCCESS)
2605 "INT: Recreated Logical port");
2608 "INT: Logical ports weren't recreated");
2610 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2611 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2612 if (status_mbx != FM10K_SUCCESS)
2613 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2615 fm10k_mbx_unlock(hw);
2617 /* first clear the internal SW recording structure */
2618 if (!(dev->data->dev_conf.rxmode.mq_mode &
2619 ETH_MQ_RX_VMDQ_FLAG))
2620 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2623 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2624 MAIN_VSI_POOL_NUMBER);
2627 * Add default mac address and vlan for the logical
2628 * ports that have been created, leave to the
2629 * application to fully recover Rx filtering.
2631 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2632 MAIN_VSI_POOL_NUMBER);
2634 if (!(dev->data->dev_conf.rxmode.mq_mode &
2635 ETH_MQ_RX_VMDQ_FLAG))
2636 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2639 dev_info->sm_down = 0;
2640 _rte_eth_dev_callback_process(dev,
2641 RTE_ETH_EVENT_INTR_LSC,
2646 /* Handle mailbox message */
2648 err = hw->mbx.ops.process(hw, &hw->mbx);
2649 fm10k_mbx_unlock(hw);
2651 if (err == FM10K_ERR_RESET_REQUESTED) {
2652 PMD_INIT_LOG(INFO, "INT: Switch is down");
2653 dev_info->sm_down = 1;
2654 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2658 /* Handle SRAM error */
2659 if (cause & FM10K_EICR_SRAMERROR) {
2660 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2662 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2663 /* Write to clear pending bits */
2664 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2666 /* Todo: print out error message after shared code updates */
2669 /* Clear these 3 events if having any */
2670 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2671 FM10K_EICR_SWITCHREADY;
2673 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2675 /* Re-enable interrupt from device side */
2676 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2677 FM10K_ITR_MASK_CLEAR);
2678 /* Re-enable interrupt from host side */
2679 rte_intr_enable(dev->intr_handle);
2683 * VF interrupt handler triggered by NIC for handling specific interrupt.
2686 * Pointer to interrupt handle.
2688 * The address of parameter (struct rte_eth_dev *) regsitered before.
2694 fm10k_dev_interrupt_handler_vf(void *param)
2696 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2697 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2698 struct fm10k_mbx_info *mbx = &hw->mbx;
2699 struct fm10k_dev_info *dev_info =
2700 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2701 const enum fm10k_mbx_state state = mbx->state;
2704 if (hw->mac.type != fm10k_mac_vf)
2707 /* Handle mailbox message if lock is acquired */
2709 hw->mbx.ops.process(hw, &hw->mbx);
2710 fm10k_mbx_unlock(hw);
2712 if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2713 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2716 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2718 fm10k_mbx_unlock(hw);
2720 /* Setting reset flag */
2721 dev_info->sm_down = 1;
2722 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2726 if (dev_info->sm_down == 1 &&
2727 hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2728 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2730 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2731 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2732 if (status_mbx != FM10K_SUCCESS)
2733 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2734 fm10k_mbx_unlock(hw);
2736 /* first clear the internal SW recording structure */
2737 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2738 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2739 MAIN_VSI_POOL_NUMBER);
2742 * Add default mac address and vlan for the logical ports that
2743 * have been created, leave to the application to fully recover
2746 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2747 MAIN_VSI_POOL_NUMBER);
2748 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2750 dev_info->sm_down = 0;
2751 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2755 /* Re-enable interrupt from device side */
2756 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2757 FM10K_ITR_MASK_CLEAR);
2758 /* Re-enable interrupt from host side */
2759 rte_intr_enable(dev->intr_handle);
2762 /* Mailbox message handler in VF */
2763 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2764 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2765 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2766 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2767 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2771 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2775 /* Initialize mailbox lock */
2776 fm10k_mbx_initlock(hw);
2778 /* Replace default message handler with new ones */
2779 if (hw->mac.type == fm10k_mac_vf)
2780 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2783 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2787 /* Connect to SM for PF device or PF for VF device */
2788 return hw->mbx.ops.connect(hw, &hw->mbx);
2792 fm10k_close_mbx_service(struct fm10k_hw *hw)
2794 /* Disconnect from SM for PF device or PF for VF device */
2795 hw->mbx.ops.disconnect(hw, &hw->mbx);
2798 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2799 .dev_configure = fm10k_dev_configure,
2800 .dev_start = fm10k_dev_start,
2801 .dev_stop = fm10k_dev_stop,
2802 .dev_close = fm10k_dev_close,
2803 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2804 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2805 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2806 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2807 .stats_get = fm10k_stats_get,
2808 .xstats_get = fm10k_xstats_get,
2809 .xstats_get_names = fm10k_xstats_get_names,
2810 .stats_reset = fm10k_stats_reset,
2811 .xstats_reset = fm10k_stats_reset,
2812 .link_update = fm10k_link_update,
2813 .dev_infos_get = fm10k_dev_infos_get,
2814 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2815 .vlan_filter_set = fm10k_vlan_filter_set,
2816 .vlan_offload_set = fm10k_vlan_offload_set,
2817 .mac_addr_add = fm10k_macaddr_add,
2818 .mac_addr_remove = fm10k_macaddr_remove,
2819 .rx_queue_start = fm10k_dev_rx_queue_start,
2820 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2821 .tx_queue_start = fm10k_dev_tx_queue_start,
2822 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2823 .rx_queue_setup = fm10k_rx_queue_setup,
2824 .rx_queue_release = fm10k_rx_queue_release,
2825 .tx_queue_setup = fm10k_tx_queue_setup,
2826 .tx_queue_release = fm10k_tx_queue_release,
2827 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2828 .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
2829 .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
2830 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2831 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2832 .reta_update = fm10k_reta_update,
2833 .reta_query = fm10k_reta_query,
2834 .rss_hash_update = fm10k_rss_hash_update,
2835 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2838 static int ftag_check_handler(__rte_unused const char *key,
2839 const char *value, __rte_unused void *opaque)
2841 if (strcmp(value, "1"))
2848 fm10k_check_ftag(struct rte_devargs *devargs)
2850 struct rte_kvargs *kvlist;
2851 const char *ftag_key = "enable_ftag";
2853 if (devargs == NULL)
2856 kvlist = rte_kvargs_parse(devargs->args, NULL);
2860 if (!rte_kvargs_count(kvlist, ftag_key)) {
2861 rte_kvargs_free(kvlist);
2864 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2865 if (rte_kvargs_process(kvlist, ftag_key,
2866 ftag_check_handler, NULL) < 0) {
2867 rte_kvargs_free(kvlist);
2870 rte_kvargs_free(kvlist);
2876 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2880 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2885 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2886 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2897 static void __attribute__((cold))
2898 fm10k_set_tx_function(struct rte_eth_dev *dev)
2900 struct fm10k_tx_queue *txq;
2903 uint16_t tx_ftag_en = 0;
2905 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2906 /* primary process has set the ftag flag and offloads */
2907 txq = dev->data->tx_queues[0];
2908 if (fm10k_tx_vec_condition_check(txq)) {
2909 dev->tx_pkt_burst = fm10k_xmit_pkts;
2910 dev->tx_pkt_prepare = fm10k_prep_pkts;
2911 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2913 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2914 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2915 dev->tx_pkt_prepare = NULL;
2920 if (fm10k_check_ftag(dev->device->devargs))
2923 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2924 txq = dev->data->tx_queues[i];
2925 txq->tx_ftag_en = tx_ftag_en;
2926 /* Check if Vector Tx is satisfied */
2927 if (fm10k_tx_vec_condition_check(txq))
2932 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2933 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2934 txq = dev->data->tx_queues[i];
2935 fm10k_txq_vec_setup(txq);
2937 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2938 dev->tx_pkt_prepare = NULL;
2940 dev->tx_pkt_burst = fm10k_xmit_pkts;
2941 dev->tx_pkt_prepare = fm10k_prep_pkts;
2942 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2946 static void __attribute__((cold))
2947 fm10k_set_rx_function(struct rte_eth_dev *dev)
2949 struct fm10k_dev_info *dev_info =
2950 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2951 uint16_t i, rx_using_sse;
2952 uint16_t rx_ftag_en = 0;
2954 if (fm10k_check_ftag(dev->device->devargs))
2957 /* In order to allow Vector Rx there are a few configuration
2958 * conditions to be met.
2960 if (!fm10k_rx_vec_condition_check(dev) &&
2961 dev_info->rx_vec_allowed && !rx_ftag_en) {
2962 if (dev->data->scattered_rx)
2963 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2965 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2966 } else if (dev->data->scattered_rx)
2967 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2969 dev->rx_pkt_burst = fm10k_recv_pkts;
2972 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2973 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2976 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2978 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2980 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2983 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2984 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2986 rxq->rx_using_sse = rx_using_sse;
2987 rxq->rx_ftag_en = rx_ftag_en;
2992 fm10k_params_init(struct rte_eth_dev *dev)
2994 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2995 struct fm10k_dev_info *info =
2996 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2998 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2999 * there is no way to get link status without reading BAR4. Until this
3000 * works, assume we have maximum bandwidth.
3001 * @todo - fix bus info
3003 hw->bus_caps.speed = fm10k_bus_speed_8000;
3004 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
3005 hw->bus_caps.payload = fm10k_bus_payload_512;
3006 hw->bus.speed = fm10k_bus_speed_8000;
3007 hw->bus.width = fm10k_bus_width_pcie_x8;
3008 hw->bus.payload = fm10k_bus_payload_256;
3010 info->rx_vec_allowed = true;
3014 eth_fm10k_dev_init(struct rte_eth_dev *dev)
3016 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3017 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3018 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3020 struct fm10k_macvlan_filter_info *macvlan;
3022 PMD_INIT_FUNC_TRACE();
3024 dev->dev_ops = &fm10k_eth_dev_ops;
3025 dev->rx_pkt_burst = &fm10k_recv_pkts;
3026 dev->tx_pkt_burst = &fm10k_xmit_pkts;
3027 dev->tx_pkt_prepare = &fm10k_prep_pkts;
3030 * Primary process does the whole initialization, for secondary
3031 * processes, we just select the same Rx and Tx function as primary.
3033 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3034 fm10k_set_rx_function(dev);
3035 fm10k_set_tx_function(dev);
3039 rte_eth_copy_pci_info(dev, pdev);
3041 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3042 memset(macvlan, 0, sizeof(*macvlan));
3043 /* Vendor and Device ID need to be set before init of shared code */
3044 memset(hw, 0, sizeof(*hw));
3045 hw->device_id = pdev->id.device_id;
3046 hw->vendor_id = pdev->id.vendor_id;
3047 hw->subsystem_device_id = pdev->id.subsystem_device_id;
3048 hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3049 hw->revision_id = 0;
3050 hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3051 if (hw->hw_addr == NULL) {
3052 PMD_INIT_LOG(ERR, "Bad mem resource."
3053 " Try to blacklist unused devices.");
3057 /* Store fm10k_adapter pointer */
3058 hw->back = dev->data->dev_private;
3060 /* Initialize the shared code */
3061 diag = fm10k_init_shared_code(hw);
3062 if (diag != FM10K_SUCCESS) {
3063 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3067 /* Initialize parameters */
3068 fm10k_params_init(dev);
3070 /* Initialize the hw */
3071 diag = fm10k_init_hw(hw);
3072 if (diag != FM10K_SUCCESS) {
3073 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3077 /* Initialize MAC address(es) */
3078 dev->data->mac_addrs = rte_zmalloc("fm10k",
3079 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3080 if (dev->data->mac_addrs == NULL) {
3081 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3085 diag = fm10k_read_mac_addr(hw);
3087 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3088 &dev->data->mac_addrs[0]);
3090 if (diag != FM10K_SUCCESS ||
3091 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3093 /* Generate a random addr */
3094 eth_random_addr(hw->mac.addr);
3095 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3096 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3097 &dev->data->mac_addrs[0]);
3100 /* Reset the hw statistics */
3101 fm10k_stats_reset(dev);
3104 diag = fm10k_reset_hw(hw);
3105 if (diag != FM10K_SUCCESS) {
3106 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3110 /* Setup mailbox service */
3111 diag = fm10k_setup_mbx_service(hw);
3112 if (diag != FM10K_SUCCESS) {
3113 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3117 /*PF/VF has different interrupt handling mechanism */
3118 if (hw->mac.type == fm10k_mac_pf) {
3119 /* register callback func to eal lib */
3120 rte_intr_callback_register(intr_handle,
3121 fm10k_dev_interrupt_handler_pf, (void *)dev);
3123 /* enable MISC interrupt */
3124 fm10k_dev_enable_intr_pf(dev);
3126 rte_intr_callback_register(intr_handle,
3127 fm10k_dev_interrupt_handler_vf, (void *)dev);
3129 fm10k_dev_enable_intr_vf(dev);
3132 /* Enable intr after callback registered */
3133 rte_intr_enable(intr_handle);
3135 hw->mac.ops.update_int_moderator(hw);
3137 /* Make sure Switch Manager is ready before going forward. */
3138 if (hw->mac.type == fm10k_mac_pf) {
3139 int switch_ready = 0;
3141 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3143 hw->mac.ops.get_host_state(hw, &switch_ready);
3144 fm10k_mbx_unlock(hw);
3147 /* Delay some time to acquire async LPORT_MAP info. */
3148 rte_delay_us(WAIT_SWITCH_MSG_US);
3151 if (switch_ready == 0) {
3152 PMD_INIT_LOG(ERR, "switch is not ready");
3158 * Below function will trigger operations on mailbox, acquire lock to
3159 * avoid race condition from interrupt handler. Operations on mailbox
3160 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3161 * will handle and generate an interrupt to our side. Then, FIFO in
3162 * mailbox will be touched.
3165 /* Enable port first */
3166 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3169 /* Set unicast mode by default. App can change to other mode in other
3172 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3173 FM10K_XCAST_MODE_NONE);
3175 fm10k_mbx_unlock(hw);
3177 /* Make sure default VID is ready before going forward. */
3178 if (hw->mac.type == fm10k_mac_pf) {
3179 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3180 if (hw->mac.default_vid)
3182 /* Delay some time to acquire async port VLAN info. */
3183 rte_delay_us(WAIT_SWITCH_MSG_US);
3186 if (!hw->mac.default_vid) {
3187 PMD_INIT_LOG(ERR, "default VID is not ready");
3192 /* Add default mac address */
3193 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3194 MAIN_VSI_POOL_NUMBER);
3200 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3202 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3203 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3204 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3205 PMD_INIT_FUNC_TRACE();
3207 /* only uninitialize in the primary process */
3208 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3211 /* safe to close dev here */
3212 fm10k_dev_close(dev);
3214 dev->dev_ops = NULL;
3215 dev->rx_pkt_burst = NULL;
3216 dev->tx_pkt_burst = NULL;
3218 /* disable uio/vfio intr */
3219 rte_intr_disable(intr_handle);
3221 /*PF/VF has different interrupt handling mechanism */
3222 if (hw->mac.type == fm10k_mac_pf) {
3223 /* disable interrupt */
3224 fm10k_dev_disable_intr_pf(dev);
3226 /* unregister callback func to eal lib */
3227 rte_intr_callback_unregister(intr_handle,
3228 fm10k_dev_interrupt_handler_pf, (void *)dev);
3230 /* disable interrupt */
3231 fm10k_dev_disable_intr_vf(dev);
3233 rte_intr_callback_unregister(intr_handle,
3234 fm10k_dev_interrupt_handler_vf, (void *)dev);
3237 /* free mac memory */
3238 if (dev->data->mac_addrs) {
3239 rte_free(dev->data->mac_addrs);
3240 dev->data->mac_addrs = NULL;
3243 memset(hw, 0, sizeof(*hw));
3248 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3249 struct rte_pci_device *pci_dev)
3251 return rte_eth_dev_pci_generic_probe(pci_dev,
3252 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3255 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3257 return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3261 * The set of PCI devices this driver supports. This driver will enable both PF
3262 * and SRIOV-VF devices.
3264 static const struct rte_pci_id pci_id_fm10k_map[] = {
3265 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3266 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3267 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3268 { .vendor_id = 0, /* sentinel */ },
3271 static struct rte_pci_driver rte_pmd_fm10k = {
3272 .id_table = pci_id_fm10k_map,
3273 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3274 RTE_PCI_DRV_IOVA_AS_VA,
3275 .probe = eth_fm10k_pci_probe,
3276 .remove = eth_fm10k_pci_remove,
3279 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3280 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3281 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3283 RTE_INIT(fm10k_init_log)
3285 fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
3286 if (fm10k_logtype_init >= 0)
3287 rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
3288 fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
3289 if (fm10k_logtype_driver >= 0)
3290 rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);