4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
40 #include <rte_kvargs.h>
43 #include "base/fm10k_api.h"
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 #define MAIN_VSI_POOL_NUMBER 0
51 /* Max try times to acquire switch status */
52 #define MAX_QUERY_SWITCH_STATE_TIMES 10
53 /* Wait interval to get switch status */
54 #define WAIT_SWITCH_MSG_US 100000
55 /* Number of chars per uint32 type */
56 #define CHARS_PER_UINT32 (sizeof(uint32_t))
57 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
59 /* default 1:1 map from queue ID to interrupt vector ID */
60 #define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])
62 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
63 #define MAX_LPORT_NUM 128
64 #define GLORT_FD_Q_BASE 0x40
65 #define GLORT_PF_MASK 0xFFC0
66 #define GLORT_FD_MASK GLORT_PF_MASK
67 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
69 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
70 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
71 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
73 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
74 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
76 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
77 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
78 const u8 *mac, bool add, uint32_t pool);
79 static void fm10k_tx_queue_release(void *queue);
80 static void fm10k_rx_queue_release(void *queue);
81 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
82 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
83 static int fm10k_check_ftag(struct rte_devargs *devargs);
85 struct fm10k_xstats_name_off {
86 char name[RTE_ETH_XSTATS_NAME_SIZE];
90 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
91 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
92 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
93 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
94 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
95 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
96 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
97 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
98 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
102 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
103 sizeof(fm10k_hw_stats_strings[0]))
105 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
106 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
107 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
108 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
111 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
112 sizeof(fm10k_hw_stats_rx_q_strings[0]))
114 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
115 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
116 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
119 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
120 sizeof(fm10k_hw_stats_tx_q_strings[0]))
122 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
123 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
125 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
128 fm10k_mbx_initlock(struct fm10k_hw *hw)
130 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
134 fm10k_mbx_lock(struct fm10k_hw *hw)
136 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
137 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
141 fm10k_mbx_unlock(struct fm10k_hw *hw)
143 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
146 /* Stubs needed for linkage when vPMD is disabled */
147 int __attribute__((weak))
148 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
153 uint16_t __attribute__((weak))
155 __rte_unused void *rx_queue,
156 __rte_unused struct rte_mbuf **rx_pkts,
157 __rte_unused uint16_t nb_pkts)
162 uint16_t __attribute__((weak))
163 fm10k_recv_scattered_pkts_vec(
164 __rte_unused void *rx_queue,
165 __rte_unused struct rte_mbuf **rx_pkts,
166 __rte_unused uint16_t nb_pkts)
171 int __attribute__((weak))
172 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
178 void __attribute__((weak))
179 fm10k_rx_queue_release_mbufs_vec(
180 __rte_unused struct fm10k_rx_queue *rxq)
185 void __attribute__((weak))
186 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
191 int __attribute__((weak))
192 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
197 uint16_t __attribute__((weak))
198 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
199 __rte_unused struct rte_mbuf **tx_pkts,
200 __rte_unused uint16_t nb_pkts)
206 * reset queue to initial state, allocate software buffers used when starting
208 * return 0 on success
209 * return -ENOMEM if buffers cannot be allocated
210 * return -EINVAL if buffers do not satisfy alignment condition
213 rx_queue_reset(struct fm10k_rx_queue *q)
215 static const union fm10k_rx_desc zero = {{0} };
218 PMD_INIT_FUNC_TRACE();
220 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
224 for (i = 0; i < q->nb_desc; ++i) {
225 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
226 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
227 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
231 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
232 q->hw_ring[i].q.pkt_addr = dma_addr;
233 q->hw_ring[i].q.hdr_addr = dma_addr;
236 /* initialize extra software ring entries. Space for these extra
237 * entries is always allocated.
239 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
240 for (i = 0; i < q->nb_fake_desc; ++i) {
241 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
242 q->hw_ring[q->nb_desc + i] = zero;
247 q->next_trigger = q->alloc_thresh - 1;
248 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
249 q->rxrearm_start = 0;
256 * clean queue, descriptor rings, free software buffers used when stopping
260 rx_queue_clean(struct fm10k_rx_queue *q)
262 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
264 PMD_INIT_FUNC_TRACE();
266 /* zero descriptor rings */
267 for (i = 0; i < q->nb_desc; ++i)
268 q->hw_ring[i] = zero;
270 /* zero faked descriptors */
271 for (i = 0; i < q->nb_fake_desc; ++i)
272 q->hw_ring[q->nb_desc + i] = zero;
274 /* vPMD driver has a different way of releasing mbufs. */
275 if (q->rx_using_sse) {
276 fm10k_rx_queue_release_mbufs_vec(q);
280 /* free software buffers */
281 for (i = 0; i < q->nb_desc; ++i) {
283 rte_pktmbuf_free_seg(q->sw_ring[i]);
284 q->sw_ring[i] = NULL;
290 * free all queue memory used when releasing the queue (i.e. configure)
293 rx_queue_free(struct fm10k_rx_queue *q)
295 PMD_INIT_FUNC_TRACE();
297 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
300 rte_free(q->sw_ring);
309 * disable RX queue, wait unitl HW finished necessary flush operation
312 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
316 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
317 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
318 reg & ~FM10K_RXQCTL_ENABLE);
320 /* Wait 100us at most */
321 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
323 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
324 if (!(reg & FM10K_RXQCTL_ENABLE))
328 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
335 * reset queue to initial state, allocate software buffers used when starting
339 tx_queue_reset(struct fm10k_tx_queue *q)
341 PMD_INIT_FUNC_TRACE();
345 q->nb_free = q->nb_desc - 1;
346 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
347 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
351 * clean queue, descriptor rings, free software buffers used when stopping
355 tx_queue_clean(struct fm10k_tx_queue *q)
357 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
359 PMD_INIT_FUNC_TRACE();
361 /* zero descriptor rings */
362 for (i = 0; i < q->nb_desc; ++i)
363 q->hw_ring[i] = zero;
365 /* free software buffers */
366 for (i = 0; i < q->nb_desc; ++i) {
368 rte_pktmbuf_free_seg(q->sw_ring[i]);
369 q->sw_ring[i] = NULL;
375 * free all queue memory used when releasing the queue (i.e. configure)
378 tx_queue_free(struct fm10k_tx_queue *q)
380 PMD_INIT_FUNC_TRACE();
382 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
384 if (q->rs_tracker.list) {
385 rte_free(q->rs_tracker.list);
386 q->rs_tracker.list = NULL;
389 rte_free(q->sw_ring);
398 * disable TX queue, wait unitl HW finished necessary flush operation
401 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
405 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
406 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
407 reg & ~FM10K_TXDCTL_ENABLE);
409 /* Wait 100us at most */
410 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
412 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
413 if (!(reg & FM10K_TXDCTL_ENABLE))
417 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
424 fm10k_check_mq_mode(struct rte_eth_dev *dev)
426 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
427 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
428 struct rte_eth_vmdq_rx_conf *vmdq_conf;
429 uint16_t nb_rx_q = dev->data->nb_rx_queues;
431 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
433 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
434 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
438 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
441 if (hw->mac.type == fm10k_mac_vf) {
442 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
446 /* Check VMDQ queue pool number */
447 if (vmdq_conf->nb_queue_pools >
448 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
449 vmdq_conf->nb_queue_pools > nb_rx_q) {
450 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
451 vmdq_conf->nb_queue_pools);
458 static const struct fm10k_txq_ops def_txq_ops = {
459 .reset = tx_queue_reset,
463 fm10k_dev_configure(struct rte_eth_dev *dev)
467 PMD_INIT_FUNC_TRACE();
469 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
470 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
471 /* multipe queue mode checking */
472 ret = fm10k_check_mq_mode(dev);
474 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
482 /* fls = find last set bit = 32 minus the number of leading zeros */
484 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
488 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
490 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
491 struct rte_eth_vmdq_rx_conf *vmdq_conf;
494 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
496 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
497 if (!vmdq_conf->pool_map[i].pools)
500 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
501 fm10k_mbx_unlock(hw);
506 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
508 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
510 /* Add default mac address */
511 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
512 MAIN_VSI_POOL_NUMBER);
516 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
518 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
519 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
520 uint32_t mrqc, *key, i, reta, j;
523 #define RSS_KEY_SIZE 40
524 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
525 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
526 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
527 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
528 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
529 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
532 if (dev->data->nb_rx_queues == 1 ||
533 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
534 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
537 /* random key is rss_intel_key (default) or user provided (rss_key) */
538 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
539 key = (uint32_t *)rss_intel_key;
541 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
543 /* Now fill our hash function seeds, 4 bytes at a time */
544 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
545 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
548 * Fill in redirection table
549 * The byte-swap is needed because NIC registers are in
550 * little-endian order.
553 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
554 if (j == dev->data->nb_rx_queues)
556 reta = (reta << CHAR_BIT) | j;
558 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
563 * Generate RSS hash based on packet types, TCP/UDP
564 * port numbers and/or IPv4/v6 src and dst addresses
566 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
568 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
569 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
570 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
571 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
572 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
573 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
574 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
575 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
576 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
579 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
584 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
588 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
590 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
593 for (i = 0; i < nb_lport_new; i++) {
594 /* Set unicast mode by default. App can change
595 * to other mode in other API func.
598 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
599 FM10K_XCAST_MODE_NONE);
600 fm10k_mbx_unlock(hw);
605 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
607 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 struct rte_eth_vmdq_rx_conf *vmdq_conf;
609 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
610 struct fm10k_macvlan_filter_info *macvlan;
611 uint16_t nb_queue_pools = 0; /* pool number in configuration */
612 uint16_t nb_lport_new;
614 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
615 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
617 fm10k_dev_rss_configure(dev);
619 /* only PF supports VMDQ */
620 if (hw->mac.type != fm10k_mac_pf)
623 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
624 nb_queue_pools = vmdq_conf->nb_queue_pools;
626 /* no pool number change, no need to update logic port and VLAN/MAC */
627 if (macvlan->nb_queue_pools == nb_queue_pools)
630 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
631 fm10k_dev_logic_port_update(dev, nb_lport_new);
633 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
634 memset(dev->data->mac_addrs, 0,
635 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
636 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
637 &dev->data->mac_addrs[0]);
638 memset(macvlan, 0, sizeof(*macvlan));
639 macvlan->nb_queue_pools = nb_queue_pools;
642 fm10k_dev_vmdq_rx_configure(dev);
644 fm10k_dev_pf_main_vsi_reset(dev);
648 fm10k_dev_tx_init(struct rte_eth_dev *dev)
650 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652 struct fm10k_tx_queue *txq;
656 /* Disable TXINT to avoid possible interrupt */
657 for (i = 0; i < hw->mac.max_queues; i++)
658 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
659 3 << FM10K_TXINT_TIMER_SHIFT);
662 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
663 txq = dev->data->tx_queues[i];
664 base_addr = txq->hw_ring_phys_addr;
665 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
667 /* disable queue to avoid issues while updating state */
668 ret = tx_queue_disable(hw, i);
670 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
673 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
674 * register is read-only for VF.
676 if (fm10k_check_ftag(dev->pci_dev->devargs)) {
677 if (hw->mac.type == fm10k_mac_pf) {
678 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
679 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
680 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
682 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
687 /* set location and size for descriptor ring */
688 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
689 base_addr & UINT64_LOWER_32BITS_MASK);
690 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
691 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
692 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
694 /* assign default SGLORT for each TX queue */
695 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
698 /* set up vector or scalar TX function as appropriate */
699 fm10k_set_tx_function(dev);
705 fm10k_dev_rx_init(struct rte_eth_dev *dev)
707 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
708 struct fm10k_macvlan_filter_info *macvlan;
709 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
711 struct fm10k_rx_queue *rxq;
714 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
715 uint32_t logic_port = hw->mac.dglort_map;
717 uint16_t queue_stride = 0;
719 /* enable RXINT for interrupt mode */
721 if (rte_intr_dp_is_en(intr_handle)) {
722 for (; i < dev->data->nb_rx_queues; i++) {
723 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
724 if (hw->mac.type == fm10k_mac_pf)
725 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
727 FM10K_ITR_MASK_CLEAR);
729 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
731 FM10K_ITR_MASK_CLEAR);
734 /* Disable other RXINT to avoid possible interrupt */
735 for (; i < hw->mac.max_queues; i++)
736 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
737 3 << FM10K_RXINT_TIMER_SHIFT);
739 /* Setup RX queues */
740 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
741 rxq = dev->data->rx_queues[i];
742 base_addr = rxq->hw_ring_phys_addr;
743 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
745 /* disable queue to avoid issues while updating state */
746 ret = rx_queue_disable(hw, i);
748 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
752 /* Setup the Base and Length of the Rx Descriptor Ring */
753 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
754 base_addr & UINT64_LOWER_32BITS_MASK);
755 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
756 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
757 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
759 /* Configure the Rx buffer size for one buff without split */
760 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
761 RTE_PKTMBUF_HEADROOM);
762 /* As RX buffer is aligned to 512B within mbuf, some bytes are
763 * reserved for this purpose, and the worst case could be 511B.
764 * But SRR reg assumes all buffers have the same size. In order
765 * to fill the gap, we'll have to consider the worst case and
766 * assume 512B is reserved. If we don't do so, it's possible
767 * for HW to overwrite data to next mbuf.
769 buf_size -= FM10K_RX_DATABUF_ALIGN;
771 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
772 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
773 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
775 /* It adds dual VLAN length for supporting dual VLAN */
776 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
777 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
778 dev->data->dev_conf.rxmode.enable_scatter) {
780 dev->data->scattered_rx = 1;
781 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
782 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
783 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
786 /* Enable drop on empty, it's RO for VF */
787 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
788 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
790 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
791 FM10K_WRITE_FLUSH(hw);
794 /* Configure VMDQ/RSS if applicable */
795 fm10k_dev_mq_rx_configure(dev);
797 /* Decide the best RX function */
798 fm10k_set_rx_function(dev);
800 /* update RX_SGLORT for loopback suppress*/
801 if (hw->mac.type != fm10k_mac_pf)
803 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
804 if (macvlan->nb_queue_pools)
805 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
806 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
807 if (i && queue_stride && !(i % queue_stride))
809 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
816 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
818 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821 struct fm10k_rx_queue *rxq;
823 PMD_INIT_FUNC_TRACE();
825 if (rx_queue_id < dev->data->nb_rx_queues) {
826 rxq = dev->data->rx_queues[rx_queue_id];
827 err = rx_queue_reset(rxq);
828 if (err == -ENOMEM) {
829 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
831 } else if (err == -EINVAL) {
832 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
837 /* Setup the HW Rx Head and Tail Descriptor Pointers
838 * Note: this must be done AFTER the queue is enabled on real
839 * hardware, but BEFORE the queue is enabled when using the
840 * emulation platform. Do it in both places for now and remove
841 * this comment and the following two register writes when the
842 * emulation platform is no longer being used.
844 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
845 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
847 /* Set PF ownership flag for PF devices */
848 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
849 if (hw->mac.type == fm10k_mac_pf)
850 reg |= FM10K_RXQCTL_PF;
851 reg |= FM10K_RXQCTL_ENABLE;
852 /* enable RX queue */
853 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
854 FM10K_WRITE_FLUSH(hw);
856 /* Setup the HW Rx Head and Tail Descriptor Pointers
857 * Note: this must be done AFTER the queue is enabled
859 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
860 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
861 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
868 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
870 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
872 PMD_INIT_FUNC_TRACE();
874 if (rx_queue_id < dev->data->nb_rx_queues) {
875 /* Disable RX queue */
876 rx_queue_disable(hw, rx_queue_id);
878 /* Free mbuf and clean HW ring */
879 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
880 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
887 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
889 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
890 /** @todo - this should be defined in the shared code */
891 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
892 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
895 PMD_INIT_FUNC_TRACE();
897 if (tx_queue_id < dev->data->nb_tx_queues) {
898 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
902 /* reset head and tail pointers */
903 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
904 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
906 /* enable TX queue */
907 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
908 FM10K_TXDCTL_ENABLE | txdctl);
909 FM10K_WRITE_FLUSH(hw);
910 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
918 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
920 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
922 PMD_INIT_FUNC_TRACE();
924 if (tx_queue_id < dev->data->nb_tx_queues) {
925 tx_queue_disable(hw, tx_queue_id);
926 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
927 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
933 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
935 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
936 != FM10K_DGLORTMAP_NONE);
940 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
942 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
945 PMD_INIT_FUNC_TRACE();
947 /* Return if it didn't acquire valid glort range */
948 if (!fm10k_glort_valid(hw))
952 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
953 FM10K_XCAST_MODE_PROMISC);
954 fm10k_mbx_unlock(hw);
956 if (status != FM10K_SUCCESS)
957 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
961 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
963 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
967 PMD_INIT_FUNC_TRACE();
969 /* Return if it didn't acquire valid glort range */
970 if (!fm10k_glort_valid(hw))
973 if (dev->data->all_multicast == 1)
974 mode = FM10K_XCAST_MODE_ALLMULTI;
976 mode = FM10K_XCAST_MODE_NONE;
979 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
981 fm10k_mbx_unlock(hw);
983 if (status != FM10K_SUCCESS)
984 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
988 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
990 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
993 PMD_INIT_FUNC_TRACE();
995 /* Return if it didn't acquire valid glort range */
996 if (!fm10k_glort_valid(hw))
999 /* If promiscuous mode is enabled, it doesn't make sense to enable
1000 * allmulticast and disable promiscuous since fm10k only can select
1003 if (dev->data->promiscuous) {
1004 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1005 "needn't enable allmulticast");
1010 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1011 FM10K_XCAST_MODE_ALLMULTI);
1012 fm10k_mbx_unlock(hw);
1014 if (status != FM10K_SUCCESS)
1015 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1019 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1021 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1024 PMD_INIT_FUNC_TRACE();
1026 /* Return if it didn't acquire valid glort range */
1027 if (!fm10k_glort_valid(hw))
1030 if (dev->data->promiscuous) {
1031 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1032 "since promisc mode is enabled");
1037 /* Change mode to unicast mode */
1038 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1039 FM10K_XCAST_MODE_NONE);
1040 fm10k_mbx_unlock(hw);
1042 if (status != FM10K_SUCCESS)
1043 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1047 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1049 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1050 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1051 uint16_t nb_queue_pools;
1052 struct fm10k_macvlan_filter_info *macvlan;
1054 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1055 nb_queue_pools = macvlan->nb_queue_pools;
1056 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1057 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1059 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1060 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1061 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1063 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1064 /* Configure VMDQ/RSS DGlort Decoder */
1065 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1067 /* Flow Director configurations, only queue number is valid. */
1068 dglortdec = fls(dev->data->nb_rx_queues - 1);
1069 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1070 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1071 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1072 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1074 /* Invalidate all other GLORT entries */
1075 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1076 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1077 FM10K_DGLORTMAP_NONE);
1080 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1082 fm10k_dev_start(struct rte_eth_dev *dev)
1084 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087 PMD_INIT_FUNC_TRACE();
1089 /* stop, init, then start the hw */
1090 diag = fm10k_stop_hw(hw);
1091 if (diag != FM10K_SUCCESS) {
1092 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1096 diag = fm10k_init_hw(hw);
1097 if (diag != FM10K_SUCCESS) {
1098 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1102 diag = fm10k_start_hw(hw);
1103 if (diag != FM10K_SUCCESS) {
1104 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1108 diag = fm10k_dev_tx_init(dev);
1110 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1114 if (fm10k_dev_rxq_interrupt_setup(dev))
1117 diag = fm10k_dev_rx_init(dev);
1119 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1123 if (hw->mac.type == fm10k_mac_pf)
1124 fm10k_dev_dglort_map_configure(dev);
1126 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1127 struct fm10k_rx_queue *rxq;
1128 rxq = dev->data->rx_queues[i];
1130 if (rxq->rx_deferred_start)
1132 diag = fm10k_dev_rx_queue_start(dev, i);
1135 for (j = 0; j < i; ++j)
1136 rx_queue_clean(dev->data->rx_queues[j]);
1141 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1142 struct fm10k_tx_queue *txq;
1143 txq = dev->data->tx_queues[i];
1145 if (txq->tx_deferred_start)
1147 diag = fm10k_dev_tx_queue_start(dev, i);
1150 for (j = 0; j < i; ++j)
1151 tx_queue_clean(dev->data->tx_queues[j]);
1152 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1153 rx_queue_clean(dev->data->rx_queues[j]);
1158 /* Update default vlan when not in VMDQ mode */
1159 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1160 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1166 fm10k_dev_stop(struct rte_eth_dev *dev)
1168 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1169 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1172 PMD_INIT_FUNC_TRACE();
1174 if (dev->data->tx_queues)
1175 for (i = 0; i < dev->data->nb_tx_queues; i++)
1176 fm10k_dev_tx_queue_stop(dev, i);
1178 if (dev->data->rx_queues)
1179 for (i = 0; i < dev->data->nb_rx_queues; i++)
1180 fm10k_dev_rx_queue_stop(dev, i);
1182 /* Disable datapath event */
1183 if (rte_intr_dp_is_en(intr_handle)) {
1184 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1185 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1186 3 << FM10K_RXINT_TIMER_SHIFT);
1187 if (hw->mac.type == fm10k_mac_pf)
1188 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
1189 FM10K_ITR_MASK_SET);
1191 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
1192 FM10K_ITR_MASK_SET);
1195 /* Clean datapath event and queue/vec mapping */
1196 rte_intr_efd_disable(intr_handle);
1197 rte_free(intr_handle->intr_vec);
1198 intr_handle->intr_vec = NULL;
1202 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1206 PMD_INIT_FUNC_TRACE();
1208 if (dev->data->tx_queues) {
1209 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1210 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1216 if (dev->data->rx_queues) {
1217 for (i = 0; i < dev->data->nb_rx_queues; i++)
1218 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1223 fm10k_dev_close(struct rte_eth_dev *dev)
1225 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 PMD_INIT_FUNC_TRACE();
1230 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1231 MAX_LPORT_NUM, false);
1232 fm10k_mbx_unlock(hw);
1234 /* Stop mailbox service first */
1235 fm10k_close_mbx_service(hw);
1236 fm10k_dev_stop(dev);
1237 fm10k_dev_queue_release(dev);
1242 fm10k_link_update(struct rte_eth_dev *dev,
1243 __rte_unused int wait_to_complete)
1245 PMD_INIT_FUNC_TRACE();
1247 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1248 * x8 PCIe interface. For now, we leave the speed undefined since there
1249 * is no 50Gbps Ethernet. */
1250 dev->data->dev_link.link_speed = 0;
1251 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1252 dev->data->dev_link.link_status = 1;
1258 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1261 struct fm10k_hw_stats *hw_stats =
1262 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1263 unsigned i, q, count = 0;
1265 if (n < FM10K_NB_XSTATS)
1266 return FM10K_NB_XSTATS;
1269 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1270 snprintf(xstats[count].name, sizeof(xstats[count].name),
1271 "%s", fm10k_hw_stats_strings[count].name);
1272 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1273 fm10k_hw_stats_strings[count].offset);
1277 /* PF queue stats */
1278 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1279 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1280 snprintf(xstats[count].name, sizeof(xstats[count].name),
1282 fm10k_hw_stats_rx_q_strings[i].name);
1283 xstats[count].value =
1284 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1285 fm10k_hw_stats_rx_q_strings[i].offset);
1288 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1289 snprintf(xstats[count].name, sizeof(xstats[count].name),
1291 fm10k_hw_stats_tx_q_strings[i].name);
1292 xstats[count].value =
1293 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1294 fm10k_hw_stats_tx_q_strings[i].offset);
1299 return FM10K_NB_XSTATS;
1303 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1305 uint64_t ipackets, opackets, ibytes, obytes;
1306 struct fm10k_hw *hw =
1307 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1308 struct fm10k_hw_stats *hw_stats =
1309 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1312 PMD_INIT_FUNC_TRACE();
1314 fm10k_update_hw_stats(hw, hw_stats);
1316 ipackets = opackets = ibytes = obytes = 0;
1317 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1318 (i < hw->mac.max_queues); ++i) {
1319 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1320 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1321 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1322 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1323 ipackets += stats->q_ipackets[i];
1324 opackets += stats->q_opackets[i];
1325 ibytes += stats->q_ibytes[i];
1326 obytes += stats->q_obytes[i];
1328 stats->ipackets = ipackets;
1329 stats->opackets = opackets;
1330 stats->ibytes = ibytes;
1331 stats->obytes = obytes;
1335 fm10k_stats_reset(struct rte_eth_dev *dev)
1337 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1338 struct fm10k_hw_stats *hw_stats =
1339 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1341 PMD_INIT_FUNC_TRACE();
1343 memset(hw_stats, 0, sizeof(*hw_stats));
1344 fm10k_rebind_hw_stats(hw, hw_stats);
1348 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1349 struct rte_eth_dev_info *dev_info)
1351 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1353 PMD_INIT_FUNC_TRACE();
1355 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1356 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1357 dev_info->max_rx_queues = hw->mac.max_queues;
1358 dev_info->max_tx_queues = hw->mac.max_queues;
1359 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1360 dev_info->max_hash_mac_addrs = 0;
1361 dev_info->max_vfs = dev->pci_dev->max_vfs;
1362 dev_info->vmdq_pool_base = 0;
1363 dev_info->vmdq_queue_base = 0;
1364 dev_info->max_vmdq_pools = ETH_32_POOLS;
1365 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1366 dev_info->rx_offload_capa =
1367 DEV_RX_OFFLOAD_VLAN_STRIP |
1368 DEV_RX_OFFLOAD_IPV4_CKSUM |
1369 DEV_RX_OFFLOAD_UDP_CKSUM |
1370 DEV_RX_OFFLOAD_TCP_CKSUM;
1371 dev_info->tx_offload_capa =
1372 DEV_TX_OFFLOAD_VLAN_INSERT |
1373 DEV_TX_OFFLOAD_IPV4_CKSUM |
1374 DEV_TX_OFFLOAD_UDP_CKSUM |
1375 DEV_TX_OFFLOAD_TCP_CKSUM |
1376 DEV_TX_OFFLOAD_TCP_TSO;
1378 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1379 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1381 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1383 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1384 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1385 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1387 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1391 dev_info->default_txconf = (struct rte_eth_txconf) {
1393 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1394 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1395 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1397 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1398 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1399 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1402 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1403 .nb_max = FM10K_MAX_RX_DESC,
1404 .nb_min = FM10K_MIN_RX_DESC,
1405 .nb_align = FM10K_MULT_RX_DESC,
1408 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1409 .nb_max = FM10K_MAX_TX_DESC,
1410 .nb_min = FM10K_MIN_TX_DESC,
1411 .nb_align = FM10K_MULT_TX_DESC,
1415 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1416 static const uint32_t *
1417 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1419 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1420 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1421 static uint32_t ptypes[] = {
1422 /* refers to rx_desc_to_ol_flags() */
1425 RTE_PTYPE_L3_IPV4_EXT,
1427 RTE_PTYPE_L3_IPV6_EXT,
1434 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1435 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1436 static uint32_t ptypes_vec[] = {
1437 /* refers to fm10k_desc_to_pktype_v() */
1439 RTE_PTYPE_L3_IPV4_EXT,
1441 RTE_PTYPE_L3_IPV6_EXT,
1444 RTE_PTYPE_TUNNEL_GENEVE,
1445 RTE_PTYPE_TUNNEL_NVGRE,
1446 RTE_PTYPE_TUNNEL_VXLAN,
1447 RTE_PTYPE_TUNNEL_GRE,
1457 static const uint32_t *
1458 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1465 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1468 uint16_t mac_num = 0;
1469 uint32_t vid_idx, vid_bit, mac_index;
1470 struct fm10k_hw *hw;
1471 struct fm10k_macvlan_filter_info *macvlan;
1472 struct rte_eth_dev_data *data = dev->data;
1474 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1475 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1477 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1478 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1482 if (vlan_id > ETH_VLAN_ID_MAX) {
1483 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1487 vid_idx = FM10K_VFTA_IDX(vlan_id);
1488 vid_bit = FM10K_VFTA_BIT(vlan_id);
1489 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1490 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1492 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1493 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1494 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1495 "in the VLAN filter table");
1500 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1501 fm10k_mbx_unlock(hw);
1502 if (result != FM10K_SUCCESS) {
1503 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1507 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1508 (result == FM10K_SUCCESS); mac_index++) {
1509 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1511 if (mac_num > macvlan->mac_num - 1) {
1512 PMD_INIT_LOG(ERR, "MAC address number "
1517 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1518 data->mac_addrs[mac_index].addr_bytes,
1520 fm10k_mbx_unlock(hw);
1523 if (result != FM10K_SUCCESS) {
1524 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1529 macvlan->vlan_num++;
1530 macvlan->vfta[vid_idx] |= vid_bit;
1532 macvlan->vlan_num--;
1533 macvlan->vfta[vid_idx] &= ~vid_bit;
1539 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1541 if (mask & ETH_VLAN_STRIP_MASK) {
1542 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1543 PMD_INIT_LOG(ERR, "VLAN stripping is "
1544 "always on in fm10k");
1547 if (mask & ETH_VLAN_EXTEND_MASK) {
1548 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1549 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1550 "supported in fm10k");
1553 if (mask & ETH_VLAN_FILTER_MASK) {
1554 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1555 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1559 /* Add/Remove a MAC address, and update filters to main VSI */
1560 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1561 const u8 *mac, bool add, uint32_t pool)
1563 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1564 struct fm10k_macvlan_filter_info *macvlan;
1567 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1569 if (pool != MAIN_VSI_POOL_NUMBER) {
1570 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1571 "mac to pool %u", pool);
1574 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1575 if (!macvlan->vfta[j])
1577 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1578 if (!(macvlan->vfta[j] & (1 << k)))
1580 if (i + 1 > macvlan->vlan_num) {
1581 PMD_INIT_LOG(ERR, "vlan number not match");
1585 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1586 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1587 fm10k_mbx_unlock(hw);
1593 /* Add/Remove a MAC address, and update filters to VMDQ */
1594 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1595 const u8 *mac, bool add, uint32_t pool)
1597 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598 struct fm10k_macvlan_filter_info *macvlan;
1599 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1602 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1603 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1605 if (pool > macvlan->nb_queue_pools) {
1606 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1608 pool, macvlan->nb_queue_pools);
1611 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1612 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1615 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1616 vmdq_conf->pool_map[i].vlan_id, add, 0);
1617 fm10k_mbx_unlock(hw);
1621 /* Add/Remove a MAC address, and update filters */
1622 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1623 const u8 *mac, bool add, uint32_t pool)
1625 struct fm10k_macvlan_filter_info *macvlan;
1627 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1629 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1630 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1632 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1640 /* Add a MAC address, and update filters */
1642 fm10k_macaddr_add(struct rte_eth_dev *dev,
1643 struct ether_addr *mac_addr,
1647 struct fm10k_macvlan_filter_info *macvlan;
1649 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1650 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1651 macvlan->mac_vmdq_id[index] = pool;
1654 /* Remove a MAC address, and update filters */
1656 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1658 struct rte_eth_dev_data *data = dev->data;
1659 struct fm10k_macvlan_filter_info *macvlan;
1661 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1662 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1663 FALSE, macvlan->mac_vmdq_id[index]);
1664 macvlan->mac_vmdq_id[index] = 0;
1668 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1670 if ((request < min) || (request > max) || ((request % mult) != 0))
1678 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1680 if ((request < min) || (request > max) || ((div % request) != 0))
1687 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1689 uint16_t rx_free_thresh;
1691 if (conf->rx_free_thresh == 0)
1692 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1694 rx_free_thresh = conf->rx_free_thresh;
1696 /* make sure the requested threshold satisfies the constraints */
1697 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1698 FM10K_RX_FREE_THRESH_MAX(q),
1699 FM10K_RX_FREE_THRESH_DIV(q),
1701 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1702 "less than or equal to %u, "
1703 "greater than or equal to %u, "
1704 "and a divisor of %u",
1705 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1706 FM10K_RX_FREE_THRESH_MIN(q),
1707 FM10K_RX_FREE_THRESH_DIV(q));
1711 q->alloc_thresh = rx_free_thresh;
1712 q->drop_en = conf->rx_drop_en;
1713 q->rx_deferred_start = conf->rx_deferred_start;
1719 * Hardware requires specific alignment for Rx packet buffers. At
1720 * least one of the following two conditions must be satisfied.
1721 * 1. Address is 512B aligned
1722 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1724 * As such, the driver may need to adjust the DMA address within the
1725 * buffer by up to 512B.
1727 * return 1 if the element size is valid, otherwise return 0.
1730 mempool_element_size_valid(struct rte_mempool *mp)
1734 /* elt_size includes mbuf header and headroom */
1735 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1736 RTE_PKTMBUF_HEADROOM;
1738 /* account for up to 512B of alignment */
1739 min_size -= FM10K_RX_DATABUF_ALIGN;
1741 /* sanity check for overflow */
1742 if (min_size > mp->elt_size)
1750 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1751 uint16_t nb_desc, unsigned int socket_id,
1752 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1754 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1755 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1756 struct fm10k_rx_queue *q;
1757 const struct rte_memzone *mz;
1759 PMD_INIT_FUNC_TRACE();
1761 /* make sure the mempool element size can account for alignment. */
1762 if (!mempool_element_size_valid(mp)) {
1763 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1767 /* make sure a valid number of descriptors have been requested */
1768 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1769 FM10K_MULT_RX_DESC, nb_desc)) {
1770 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1771 "less than or equal to %"PRIu32", "
1772 "greater than or equal to %u, "
1773 "and a multiple of %u",
1774 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1775 FM10K_MULT_RX_DESC);
1780 * if this queue existed already, free the associated memory. The
1781 * queue cannot be reused in case we need to allocate memory on
1782 * different socket than was previously used.
1784 if (dev->data->rx_queues[queue_id] != NULL) {
1785 rx_queue_free(dev->data->rx_queues[queue_id]);
1786 dev->data->rx_queues[queue_id] = NULL;
1789 /* allocate memory for the queue structure */
1790 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1793 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1799 q->nb_desc = nb_desc;
1800 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1801 q->port_id = dev->data->port_id;
1802 q->queue_id = queue_id;
1803 q->tail_ptr = (volatile uint32_t *)
1804 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1805 if (handle_rxconf(q, conf))
1808 /* allocate memory for the software ring */
1809 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1810 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1811 RTE_CACHE_LINE_SIZE, socket_id);
1812 if (q->sw_ring == NULL) {
1813 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1819 * allocate memory for the hardware descriptor ring. A memzone large
1820 * enough to hold the maximum ring size is requested to allow for
1821 * resizing in later calls to the queue setup function.
1823 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1824 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1827 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1828 rte_free(q->sw_ring);
1832 q->hw_ring = mz->addr;
1833 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1835 /* Check if number of descs satisfied Vector requirement */
1836 if (!rte_is_power_of_2(nb_desc)) {
1837 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1838 "preconditions - canceling the feature for "
1839 "the whole port[%d]",
1840 q->queue_id, q->port_id);
1841 dev_info->rx_vec_allowed = false;
1843 fm10k_rxq_vec_setup(q);
1845 dev->data->rx_queues[queue_id] = q;
1850 fm10k_rx_queue_release(void *queue)
1852 PMD_INIT_FUNC_TRACE();
1854 rx_queue_free(queue);
1858 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1860 uint16_t tx_free_thresh;
1861 uint16_t tx_rs_thresh;
1863 /* constraint MACROs require that tx_free_thresh is configured
1864 * before tx_rs_thresh */
1865 if (conf->tx_free_thresh == 0)
1866 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1868 tx_free_thresh = conf->tx_free_thresh;
1870 /* make sure the requested threshold satisfies the constraints */
1871 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1872 FM10K_TX_FREE_THRESH_MAX(q),
1873 FM10K_TX_FREE_THRESH_DIV(q),
1875 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1876 "less than or equal to %u, "
1877 "greater than or equal to %u, "
1878 "and a divisor of %u",
1879 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1880 FM10K_TX_FREE_THRESH_MIN(q),
1881 FM10K_TX_FREE_THRESH_DIV(q));
1885 q->free_thresh = tx_free_thresh;
1887 if (conf->tx_rs_thresh == 0)
1888 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1890 tx_rs_thresh = conf->tx_rs_thresh;
1892 q->tx_deferred_start = conf->tx_deferred_start;
1894 /* make sure the requested threshold satisfies the constraints */
1895 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1896 FM10K_TX_RS_THRESH_MAX(q),
1897 FM10K_TX_RS_THRESH_DIV(q),
1899 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1900 "less than or equal to %u, "
1901 "greater than or equal to %u, "
1902 "and a divisor of %u",
1903 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1904 FM10K_TX_RS_THRESH_MIN(q),
1905 FM10K_TX_RS_THRESH_DIV(q));
1909 q->rs_thresh = tx_rs_thresh;
1915 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1916 uint16_t nb_desc, unsigned int socket_id,
1917 const struct rte_eth_txconf *conf)
1919 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1920 struct fm10k_tx_queue *q;
1921 const struct rte_memzone *mz;
1923 PMD_INIT_FUNC_TRACE();
1925 /* make sure a valid number of descriptors have been requested */
1926 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1927 FM10K_MULT_TX_DESC, nb_desc)) {
1928 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1929 "less than or equal to %"PRIu32", "
1930 "greater than or equal to %u, "
1931 "and a multiple of %u",
1932 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1933 FM10K_MULT_TX_DESC);
1938 * if this queue existed already, free the associated memory. The
1939 * queue cannot be reused in case we need to allocate memory on
1940 * different socket than was previously used.
1942 if (dev->data->tx_queues[queue_id] != NULL) {
1943 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1946 dev->data->tx_queues[queue_id] = NULL;
1949 /* allocate memory for the queue structure */
1950 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1953 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1958 q->nb_desc = nb_desc;
1959 q->port_id = dev->data->port_id;
1960 q->queue_id = queue_id;
1961 q->txq_flags = conf->txq_flags;
1962 q->ops = &def_txq_ops;
1963 q->tail_ptr = (volatile uint32_t *)
1964 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1965 if (handle_txconf(q, conf))
1968 /* allocate memory for the software ring */
1969 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1970 nb_desc * sizeof(struct rte_mbuf *),
1971 RTE_CACHE_LINE_SIZE, socket_id);
1972 if (q->sw_ring == NULL) {
1973 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1979 * allocate memory for the hardware descriptor ring. A memzone large
1980 * enough to hold the maximum ring size is requested to allow for
1981 * resizing in later calls to the queue setup function.
1983 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1984 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1987 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1988 rte_free(q->sw_ring);
1992 q->hw_ring = mz->addr;
1993 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1996 * allocate memory for the RS bit tracker. Enough slots to hold the
1997 * descriptor index for each RS bit needing to be set are required.
1999 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2000 ((nb_desc + 1) / q->rs_thresh) *
2002 RTE_CACHE_LINE_SIZE, socket_id);
2003 if (q->rs_tracker.list == NULL) {
2004 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2005 rte_free(q->sw_ring);
2010 dev->data->tx_queues[queue_id] = q;
2015 fm10k_tx_queue_release(void *queue)
2017 struct fm10k_tx_queue *q = queue;
2018 PMD_INIT_FUNC_TRACE();
2024 fm10k_reta_update(struct rte_eth_dev *dev,
2025 struct rte_eth_rss_reta_entry64 *reta_conf,
2028 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2029 uint16_t i, j, idx, shift;
2033 PMD_INIT_FUNC_TRACE();
2035 if (reta_size > FM10K_MAX_RSS_INDICES) {
2036 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2037 "(%d) doesn't match the number hardware can supported "
2038 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2043 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2044 * 128-entries in 32 registers
2046 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2047 idx = i / RTE_RETA_GROUP_SIZE;
2048 shift = i % RTE_RETA_GROUP_SIZE;
2049 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2050 BIT_MASK_PER_UINT32);
2055 if (mask != BIT_MASK_PER_UINT32)
2056 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2058 for (j = 0; j < CHARS_PER_UINT32; j++) {
2059 if (mask & (0x1 << j)) {
2061 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2062 reta |= reta_conf[idx].reta[shift + j] <<
2066 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2073 fm10k_reta_query(struct rte_eth_dev *dev,
2074 struct rte_eth_rss_reta_entry64 *reta_conf,
2077 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078 uint16_t i, j, idx, shift;
2082 PMD_INIT_FUNC_TRACE();
2084 if (reta_size < FM10K_MAX_RSS_INDICES) {
2085 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2086 "(%d) doesn't match the number hardware can supported "
2087 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2092 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2093 * 128-entries in 32 registers
2095 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2096 idx = i / RTE_RETA_GROUP_SIZE;
2097 shift = i % RTE_RETA_GROUP_SIZE;
2098 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2099 BIT_MASK_PER_UINT32);
2103 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2104 for (j = 0; j < CHARS_PER_UINT32; j++) {
2105 if (mask & (0x1 << j))
2106 reta_conf[idx].reta[shift + j] = ((reta >>
2107 CHAR_BIT * j) & UINT8_MAX);
2115 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2116 struct rte_eth_rss_conf *rss_conf)
2118 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2119 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2121 uint64_t hf = rss_conf->rss_hf;
2124 PMD_INIT_FUNC_TRACE();
2126 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2127 FM10K_RSSRK_ENTRIES_PER_REG)
2134 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2135 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2136 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2137 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2138 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2139 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2140 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2141 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2142 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2144 /* If the mapping doesn't fit any supported, return */
2149 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2150 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2152 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2158 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2159 struct rte_eth_rss_conf *rss_conf)
2161 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2162 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2167 PMD_INIT_FUNC_TRACE();
2169 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2170 FM10K_RSSRK_ENTRIES_PER_REG)
2174 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2175 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2177 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2179 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2180 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2181 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2182 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2183 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2184 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2185 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2186 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2187 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2189 rss_conf->rss_hf = hf;
2195 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2197 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2198 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2200 /* Bind all local non-queue interrupt to vector 0 */
2201 int_map |= FM10K_MISC_VEC_ID;
2203 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2204 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2205 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2206 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2207 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2208 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2210 /* Enable misc causes */
2211 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2212 FM10K_EIMR_ENABLE(THI_FAULT) |
2213 FM10K_EIMR_ENABLE(FUM_FAULT) |
2214 FM10K_EIMR_ENABLE(MAILBOX) |
2215 FM10K_EIMR_ENABLE(SWITCHREADY) |
2216 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2217 FM10K_EIMR_ENABLE(SRAMERROR) |
2218 FM10K_EIMR_ENABLE(VFLR));
2221 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2222 FM10K_ITR_MASK_CLEAR);
2223 FM10K_WRITE_FLUSH(hw);
2227 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2229 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2230 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2232 int_map |= FM10K_MISC_VEC_ID;
2234 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2235 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2236 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2237 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2238 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2239 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2241 /* Disable misc causes */
2242 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2243 FM10K_EIMR_DISABLE(THI_FAULT) |
2244 FM10K_EIMR_DISABLE(FUM_FAULT) |
2245 FM10K_EIMR_DISABLE(MAILBOX) |
2246 FM10K_EIMR_DISABLE(SWITCHREADY) |
2247 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2248 FM10K_EIMR_DISABLE(SRAMERROR) |
2249 FM10K_EIMR_DISABLE(VFLR));
2252 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2253 FM10K_WRITE_FLUSH(hw);
2257 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2259 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2260 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2262 /* Bind all local non-queue interrupt to vector 0 */
2263 int_map |= FM10K_MISC_VEC_ID;
2265 /* Only INT 0 available, other 15 are reserved. */
2266 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2269 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2270 FM10K_ITR_MASK_CLEAR);
2271 FM10K_WRITE_FLUSH(hw);
2275 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2277 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2278 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2280 int_map |= FM10K_MISC_VEC_ID;
2282 /* Only INT 0 available, other 15 are reserved. */
2283 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2286 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2287 FM10K_WRITE_FLUSH(hw);
2291 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2293 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2296 if (hw->mac.type == fm10k_mac_pf)
2297 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2298 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2300 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2301 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2302 rte_intr_enable(&dev->pci_dev->intr_handle);
2307 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2309 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2312 if (hw->mac.type == fm10k_mac_pf)
2313 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2314 FM10K_ITR_MASK_SET);
2316 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2317 FM10K_ITR_MASK_SET);
2322 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2324 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2325 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2326 uint32_t intr_vector, vec;
2330 /* fm10k needs one separate interrupt for mailbox,
2331 * so only drivers which support multiple interrupt vectors
2332 * e.g. vfio-pci can work for fm10k interrupt mode
2334 if (!rte_intr_cap_multiple(intr_handle) ||
2335 dev->data->dev_conf.intr_conf.rxq == 0)
2338 intr_vector = dev->data->nb_rx_queues;
2340 /* disable interrupt first */
2341 rte_intr_disable(&dev->pci_dev->intr_handle);
2342 if (hw->mac.type == fm10k_mac_pf)
2343 fm10k_dev_disable_intr_pf(dev);
2345 fm10k_dev_disable_intr_vf(dev);
2347 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2348 PMD_INIT_LOG(ERR, "Failed to init event fd");
2352 if (rte_intr_dp_is_en(intr_handle) && !result) {
2353 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2354 dev->data->nb_rx_queues * sizeof(int), 0);
2355 if (intr_handle->intr_vec) {
2356 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2357 queue_id < dev->data->nb_rx_queues;
2359 intr_handle->intr_vec[queue_id] = vec;
2360 if (vec < intr_handle->nb_efd - 1
2361 + FM10K_RX_VEC_START)
2365 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2366 " intr_vec", dev->data->nb_rx_queues);
2367 rte_intr_efd_disable(intr_handle);
2372 if (hw->mac.type == fm10k_mac_pf)
2373 fm10k_dev_enable_intr_pf(dev);
2375 fm10k_dev_enable_intr_vf(dev);
2376 rte_intr_enable(&dev->pci_dev->intr_handle);
2377 hw->mac.ops.update_int_moderator(hw);
2382 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2384 struct fm10k_fault fault;
2386 const char *estr = "Unknown error";
2388 /* Process PCA fault */
2389 if (eicr & FM10K_EICR_PCA_FAULT) {
2390 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2393 switch (fault.type) {
2395 estr = "PCA_NO_FAULT"; break;
2396 case PCA_UNMAPPED_ADDR:
2397 estr = "PCA_UNMAPPED_ADDR"; break;
2398 case PCA_BAD_QACCESS_PF:
2399 estr = "PCA_BAD_QACCESS_PF"; break;
2400 case PCA_BAD_QACCESS_VF:
2401 estr = "PCA_BAD_QACCESS_VF"; break;
2402 case PCA_MALICIOUS_REQ:
2403 estr = "PCA_MALICIOUS_REQ"; break;
2404 case PCA_POISONED_TLP:
2405 estr = "PCA_POISONED_TLP"; break;
2407 estr = "PCA_TLP_ABORT"; break;
2411 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2412 estr, fault.func ? "VF" : "PF", fault.func,
2413 fault.address, fault.specinfo);
2416 /* Process THI fault */
2417 if (eicr & FM10K_EICR_THI_FAULT) {
2418 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2421 switch (fault.type) {
2423 estr = "THI_NO_FAULT"; break;
2424 case THI_MAL_DIS_Q_FAULT:
2425 estr = "THI_MAL_DIS_Q_FAULT"; break;
2429 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2430 estr, fault.func ? "VF" : "PF", fault.func,
2431 fault.address, fault.specinfo);
2434 /* Process FUM fault */
2435 if (eicr & FM10K_EICR_FUM_FAULT) {
2436 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2439 switch (fault.type) {
2441 estr = "FUM_NO_FAULT"; break;
2442 case FUM_UNMAPPED_ADDR:
2443 estr = "FUM_UNMAPPED_ADDR"; break;
2444 case FUM_POISONED_TLP:
2445 estr = "FUM_POISONED_TLP"; break;
2446 case FUM_BAD_VF_QACCESS:
2447 estr = "FUM_BAD_VF_QACCESS"; break;
2448 case FUM_ADD_DECODE_ERR:
2449 estr = "FUM_ADD_DECODE_ERR"; break;
2451 estr = "FUM_RO_ERROR"; break;
2452 case FUM_QPRC_CRC_ERROR:
2453 estr = "FUM_QPRC_CRC_ERROR"; break;
2454 case FUM_CSR_TIMEOUT:
2455 estr = "FUM_CSR_TIMEOUT"; break;
2456 case FUM_INVALID_TYPE:
2457 estr = "FUM_INVALID_TYPE"; break;
2458 case FUM_INVALID_LENGTH:
2459 estr = "FUM_INVALID_LENGTH"; break;
2460 case FUM_INVALID_BE:
2461 estr = "FUM_INVALID_BE"; break;
2462 case FUM_INVALID_ALIGN:
2463 estr = "FUM_INVALID_ALIGN"; break;
2467 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2468 estr, fault.func ? "VF" : "PF", fault.func,
2469 fault.address, fault.specinfo);
2474 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2479 * PF interrupt handler triggered by NIC for handling specific interrupt.
2482 * Pointer to interrupt handle.
2484 * The address of parameter (struct rte_eth_dev *) regsitered before.
2490 fm10k_dev_interrupt_handler_pf(
2491 __rte_unused struct rte_intr_handle *handle,
2494 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2495 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2496 uint32_t cause, status;
2498 if (hw->mac.type != fm10k_mac_pf)
2501 cause = FM10K_READ_REG(hw, FM10K_EICR);
2503 /* Handle PCI fault cases */
2504 if (cause & FM10K_EICR_FAULT_MASK) {
2505 PMD_INIT_LOG(ERR, "INT: find fault!");
2506 fm10k_dev_handle_fault(hw, cause);
2509 /* Handle switch up/down */
2510 if (cause & FM10K_EICR_SWITCHNOTREADY)
2511 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2513 if (cause & FM10K_EICR_SWITCHREADY)
2514 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2516 /* Handle mailbox message */
2518 hw->mbx.ops.process(hw, &hw->mbx);
2519 fm10k_mbx_unlock(hw);
2521 /* Handle SRAM error */
2522 if (cause & FM10K_EICR_SRAMERROR) {
2523 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2525 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2526 /* Write to clear pending bits */
2527 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2529 /* Todo: print out error message after shared code updates */
2532 /* Clear these 3 events if having any */
2533 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2534 FM10K_EICR_SWITCHREADY;
2536 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2538 /* Re-enable interrupt from device side */
2539 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2540 FM10K_ITR_MASK_CLEAR);
2541 /* Re-enable interrupt from host side */
2542 rte_intr_enable(&(dev->pci_dev->intr_handle));
2546 * VF interrupt handler triggered by NIC for handling specific interrupt.
2549 * Pointer to interrupt handle.
2551 * The address of parameter (struct rte_eth_dev *) regsitered before.
2557 fm10k_dev_interrupt_handler_vf(
2558 __rte_unused struct rte_intr_handle *handle,
2561 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2562 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2564 if (hw->mac.type != fm10k_mac_vf)
2567 /* Handle mailbox message if lock is acquired */
2569 hw->mbx.ops.process(hw, &hw->mbx);
2570 fm10k_mbx_unlock(hw);
2572 /* Re-enable interrupt from device side */
2573 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2574 FM10K_ITR_MASK_CLEAR);
2575 /* Re-enable interrupt from host side */
2576 rte_intr_enable(&(dev->pci_dev->intr_handle));
2579 /* Mailbox message handler in VF */
2580 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2581 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2582 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2583 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2584 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2588 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2592 /* Initialize mailbox lock */
2593 fm10k_mbx_initlock(hw);
2595 /* Replace default message handler with new ones */
2596 if (hw->mac.type == fm10k_mac_vf)
2597 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2600 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2604 /* Connect to SM for PF device or PF for VF device */
2605 return hw->mbx.ops.connect(hw, &hw->mbx);
2609 fm10k_close_mbx_service(struct fm10k_hw *hw)
2611 /* Disconnect from SM for PF device or PF for VF device */
2612 hw->mbx.ops.disconnect(hw, &hw->mbx);
2615 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2616 .dev_configure = fm10k_dev_configure,
2617 .dev_start = fm10k_dev_start,
2618 .dev_stop = fm10k_dev_stop,
2619 .dev_close = fm10k_dev_close,
2620 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2621 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2622 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2623 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2624 .stats_get = fm10k_stats_get,
2625 .xstats_get = fm10k_xstats_get,
2626 .stats_reset = fm10k_stats_reset,
2627 .xstats_reset = fm10k_stats_reset,
2628 .link_update = fm10k_link_update,
2629 .dev_infos_get = fm10k_dev_infos_get,
2630 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2631 .vlan_filter_set = fm10k_vlan_filter_set,
2632 .vlan_offload_set = fm10k_vlan_offload_set,
2633 .mac_addr_add = fm10k_macaddr_add,
2634 .mac_addr_remove = fm10k_macaddr_remove,
2635 .rx_queue_start = fm10k_dev_rx_queue_start,
2636 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2637 .tx_queue_start = fm10k_dev_tx_queue_start,
2638 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2639 .rx_queue_setup = fm10k_rx_queue_setup,
2640 .rx_queue_release = fm10k_rx_queue_release,
2641 .tx_queue_setup = fm10k_tx_queue_setup,
2642 .tx_queue_release = fm10k_tx_queue_release,
2643 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2644 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2645 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2646 .reta_update = fm10k_reta_update,
2647 .reta_query = fm10k_reta_query,
2648 .rss_hash_update = fm10k_rss_hash_update,
2649 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2652 static int ftag_check_handler(__rte_unused const char *key,
2653 const char *value, __rte_unused void *opaque)
2655 if (strcmp(value, "1"))
2662 fm10k_check_ftag(struct rte_devargs *devargs)
2664 struct rte_kvargs *kvlist;
2665 const char *ftag_key = "enable_ftag";
2667 if (devargs == NULL)
2670 kvlist = rte_kvargs_parse(devargs->args, NULL);
2674 if (!rte_kvargs_count(kvlist, ftag_key)) {
2675 rte_kvargs_free(kvlist);
2678 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2679 if (rte_kvargs_process(kvlist, ftag_key,
2680 ftag_check_handler, NULL) < 0) {
2681 rte_kvargs_free(kvlist);
2684 rte_kvargs_free(kvlist);
2689 static void __attribute__((cold))
2690 fm10k_set_tx_function(struct rte_eth_dev *dev)
2692 struct fm10k_tx_queue *txq;
2695 uint16_t tx_ftag_en = 0;
2697 if (fm10k_check_ftag(dev->pci_dev->devargs))
2700 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2701 txq = dev->data->tx_queues[i];
2702 txq->tx_ftag_en = tx_ftag_en;
2703 /* Check if Vector Tx is satisfied */
2704 if (fm10k_tx_vec_condition_check(txq)) {
2711 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2712 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2713 txq = dev->data->tx_queues[i];
2714 fm10k_txq_vec_setup(txq);
2716 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2718 dev->tx_pkt_burst = fm10k_xmit_pkts;
2719 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2723 static void __attribute__((cold))
2724 fm10k_set_rx_function(struct rte_eth_dev *dev)
2726 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2727 uint16_t i, rx_using_sse;
2728 uint16_t rx_ftag_en = 0;
2730 if (fm10k_check_ftag(dev->pci_dev->devargs))
2733 /* In order to allow Vector Rx there are a few configuration
2734 * conditions to be met.
2736 if (!fm10k_rx_vec_condition_check(dev) &&
2737 dev_info->rx_vec_allowed && !rx_ftag_en) {
2738 if (dev->data->scattered_rx)
2739 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2741 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2742 } else if (dev->data->scattered_rx)
2743 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2745 dev->rx_pkt_burst = fm10k_recv_pkts;
2748 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2749 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2752 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2754 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2756 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2757 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2759 rxq->rx_using_sse = rx_using_sse;
2760 rxq->rx_ftag_en = rx_ftag_en;
2765 fm10k_params_init(struct rte_eth_dev *dev)
2767 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2768 struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2770 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2771 * there is no way to get link status without reading BAR4. Until this
2772 * works, assume we have maximum bandwidth.
2773 * @todo - fix bus info
2775 hw->bus_caps.speed = fm10k_bus_speed_8000;
2776 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2777 hw->bus_caps.payload = fm10k_bus_payload_512;
2778 hw->bus.speed = fm10k_bus_speed_8000;
2779 hw->bus.width = fm10k_bus_width_pcie_x8;
2780 hw->bus.payload = fm10k_bus_payload_256;
2782 info->rx_vec_allowed = true;
2786 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2788 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2790 struct fm10k_macvlan_filter_info *macvlan;
2792 PMD_INIT_FUNC_TRACE();
2794 dev->dev_ops = &fm10k_eth_dev_ops;
2795 dev->rx_pkt_burst = &fm10k_recv_pkts;
2796 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2798 /* only initialize in the primary process */
2799 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2802 rte_eth_copy_pci_info(dev, dev->pci_dev);
2804 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2805 memset(macvlan, 0, sizeof(*macvlan));
2806 /* Vendor and Device ID need to be set before init of shared code */
2807 memset(hw, 0, sizeof(*hw));
2808 hw->device_id = dev->pci_dev->id.device_id;
2809 hw->vendor_id = dev->pci_dev->id.vendor_id;
2810 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2811 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2812 hw->revision_id = 0;
2813 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2814 if (hw->hw_addr == NULL) {
2815 PMD_INIT_LOG(ERR, "Bad mem resource."
2816 " Try to blacklist unused devices.");
2820 /* Store fm10k_adapter pointer */
2821 hw->back = dev->data->dev_private;
2823 /* Initialize the shared code */
2824 diag = fm10k_init_shared_code(hw);
2825 if (diag != FM10K_SUCCESS) {
2826 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2830 /* Initialize parameters */
2831 fm10k_params_init(dev);
2833 /* Initialize the hw */
2834 diag = fm10k_init_hw(hw);
2835 if (diag != FM10K_SUCCESS) {
2836 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2840 /* Initialize MAC address(es) */
2841 dev->data->mac_addrs = rte_zmalloc("fm10k",
2842 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2843 if (dev->data->mac_addrs == NULL) {
2844 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2848 diag = fm10k_read_mac_addr(hw);
2850 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2851 &dev->data->mac_addrs[0]);
2853 if (diag != FM10K_SUCCESS ||
2854 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2856 /* Generate a random addr */
2857 eth_random_addr(hw->mac.addr);
2858 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2859 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2860 &dev->data->mac_addrs[0]);
2863 /* Reset the hw statistics */
2864 fm10k_stats_reset(dev);
2867 diag = fm10k_reset_hw(hw);
2868 if (diag != FM10K_SUCCESS) {
2869 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2873 /* Setup mailbox service */
2874 diag = fm10k_setup_mbx_service(hw);
2875 if (diag != FM10K_SUCCESS) {
2876 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2880 /*PF/VF has different interrupt handling mechanism */
2881 if (hw->mac.type == fm10k_mac_pf) {
2882 /* register callback func to eal lib */
2883 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2884 fm10k_dev_interrupt_handler_pf, (void *)dev);
2886 /* enable MISC interrupt */
2887 fm10k_dev_enable_intr_pf(dev);
2889 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2890 fm10k_dev_interrupt_handler_vf, (void *)dev);
2892 fm10k_dev_enable_intr_vf(dev);
2895 /* Enable intr after callback registered */
2896 rte_intr_enable(&(dev->pci_dev->intr_handle));
2898 hw->mac.ops.update_int_moderator(hw);
2900 /* Make sure Switch Manager is ready before going forward. */
2901 if (hw->mac.type == fm10k_mac_pf) {
2902 int switch_ready = 0;
2904 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2906 hw->mac.ops.get_host_state(hw, &switch_ready);
2907 fm10k_mbx_unlock(hw);
2910 /* Delay some time to acquire async LPORT_MAP info. */
2911 rte_delay_us(WAIT_SWITCH_MSG_US);
2914 if (switch_ready == 0) {
2915 PMD_INIT_LOG(ERR, "switch is not ready");
2921 * Below function will trigger operations on mailbox, acquire lock to
2922 * avoid race condition from interrupt handler. Operations on mailbox
2923 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2924 * will handle and generate an interrupt to our side. Then, FIFO in
2925 * mailbox will be touched.
2928 /* Enable port first */
2929 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2932 /* Set unicast mode by default. App can change to other mode in other
2935 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2936 FM10K_XCAST_MODE_NONE);
2938 fm10k_mbx_unlock(hw);
2940 /* Make sure default VID is ready before going forward. */
2941 if (hw->mac.type == fm10k_mac_pf) {
2942 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2943 if (hw->mac.default_vid)
2945 /* Delay some time to acquire async port VLAN info. */
2946 rte_delay_us(WAIT_SWITCH_MSG_US);
2949 if (!hw->mac.default_vid) {
2950 PMD_INIT_LOG(ERR, "default VID is not ready");
2955 /* Add default mac address */
2956 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2957 MAIN_VSI_POOL_NUMBER);
2963 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2965 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2967 PMD_INIT_FUNC_TRACE();
2969 /* only uninitialize in the primary process */
2970 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2973 /* safe to close dev here */
2974 fm10k_dev_close(dev);
2976 dev->dev_ops = NULL;
2977 dev->rx_pkt_burst = NULL;
2978 dev->tx_pkt_burst = NULL;
2980 /* disable uio/vfio intr */
2981 rte_intr_disable(&(dev->pci_dev->intr_handle));
2983 /*PF/VF has different interrupt handling mechanism */
2984 if (hw->mac.type == fm10k_mac_pf) {
2985 /* disable interrupt */
2986 fm10k_dev_disable_intr_pf(dev);
2988 /* unregister callback func to eal lib */
2989 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2990 fm10k_dev_interrupt_handler_pf, (void *)dev);
2992 /* disable interrupt */
2993 fm10k_dev_disable_intr_vf(dev);
2995 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2996 fm10k_dev_interrupt_handler_vf, (void *)dev);
2999 /* free mac memory */
3000 if (dev->data->mac_addrs) {
3001 rte_free(dev->data->mac_addrs);
3002 dev->data->mac_addrs = NULL;
3005 memset(hw, 0, sizeof(*hw));
3011 * The set of PCI devices this driver supports. This driver will enable both PF
3012 * and SRIOV-VF devices.
3014 static const struct rte_pci_id pci_id_fm10k_map[] = {
3015 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
3016 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
3017 #include "rte_pci_dev_ids.h"
3018 { .vendor_id = 0, /* sentinel */ },
3021 static struct eth_driver rte_pmd_fm10k = {
3023 .name = "rte_pmd_fm10k",
3024 .id_table = pci_id_fm10k_map,
3025 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3026 RTE_PCI_DRV_DETACHABLE,
3028 .eth_dev_init = eth_fm10k_dev_init,
3029 .eth_dev_uninit = eth_fm10k_dev_uninit,
3030 .dev_private_size = sizeof(struct fm10k_adapter),
3034 * Driver initialization routine.
3035 * Invoked once at EAL init time.
3036 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
3039 rte_pmd_fm10k_init(__rte_unused const char *name,
3040 __rte_unused const char *params)
3042 PMD_INIT_FUNC_TRACE();
3043 rte_eth_driver_register(&rte_pmd_fm10k);
3047 static struct rte_driver rte_fm10k_driver = {
3049 .init = rte_pmd_fm10k_init,
3052 PMD_REGISTER_DRIVER(rte_fm10k_driver);