4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 #define MAIN_VSI_POOL_NUMBER 0
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US 100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
59 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
61 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
62 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
63 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
65 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
66 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
67 const u8 *mac, bool add, uint32_t pool);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
70 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
71 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
73 struct fm10k_xstats_name_off {
74 char name[RTE_ETH_XSTATS_NAME_SIZE];
78 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
79 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
80 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
81 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
82 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
83 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
84 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
85 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
86 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
90 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
91 sizeof(fm10k_hw_stats_strings[0]))
93 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
94 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
95 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
96 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
99 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
100 sizeof(fm10k_hw_stats_rx_q_strings[0]))
102 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
103 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
104 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
107 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
108 sizeof(fm10k_hw_stats_tx_q_strings[0]))
110 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
111 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
114 fm10k_mbx_initlock(struct fm10k_hw *hw)
116 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
120 fm10k_mbx_lock(struct fm10k_hw *hw)
122 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
123 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
127 fm10k_mbx_unlock(struct fm10k_hw *hw)
129 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
132 /* Stubs needed for linkage when vPMD is disabled */
133 int __attribute__((weak))
134 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
139 uint16_t __attribute__((weak))
141 __rte_unused void *rx_queue,
142 __rte_unused struct rte_mbuf **rx_pkts,
143 __rte_unused uint16_t nb_pkts)
148 uint16_t __attribute__((weak))
149 fm10k_recv_scattered_pkts_vec(
150 __rte_unused void *rx_queue,
151 __rte_unused struct rte_mbuf **rx_pkts,
152 __rte_unused uint16_t nb_pkts)
157 int __attribute__((weak))
158 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
164 void __attribute__((weak))
165 fm10k_rx_queue_release_mbufs_vec(
166 __rte_unused struct fm10k_rx_queue *rxq)
171 void __attribute__((weak))
172 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
177 int __attribute__((weak))
178 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
183 uint16_t __attribute__((weak))
184 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
185 __rte_unused struct rte_mbuf **tx_pkts,
186 __rte_unused uint16_t nb_pkts)
192 * reset queue to initial state, allocate software buffers used when starting
194 * return 0 on success
195 * return -ENOMEM if buffers cannot be allocated
196 * return -EINVAL if buffers do not satisfy alignment condition
199 rx_queue_reset(struct fm10k_rx_queue *q)
201 static const union fm10k_rx_desc zero = {{0} };
204 PMD_INIT_FUNC_TRACE();
206 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
210 for (i = 0; i < q->nb_desc; ++i) {
211 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
212 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
213 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
217 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
218 q->hw_ring[i].q.pkt_addr = dma_addr;
219 q->hw_ring[i].q.hdr_addr = dma_addr;
222 /* initialize extra software ring entries. Space for these extra
223 * entries is always allocated.
225 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
226 for (i = 0; i < q->nb_fake_desc; ++i) {
227 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
228 q->hw_ring[q->nb_desc + i] = zero;
233 q->next_trigger = q->alloc_thresh - 1;
234 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
235 q->rxrearm_start = 0;
242 * clean queue, descriptor rings, free software buffers used when stopping
246 rx_queue_clean(struct fm10k_rx_queue *q)
248 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
250 PMD_INIT_FUNC_TRACE();
252 /* zero descriptor rings */
253 for (i = 0; i < q->nb_desc; ++i)
254 q->hw_ring[i] = zero;
256 /* zero faked descriptors */
257 for (i = 0; i < q->nb_fake_desc; ++i)
258 q->hw_ring[q->nb_desc + i] = zero;
260 /* vPMD driver has a different way of releasing mbufs. */
261 if (q->rx_using_sse) {
262 fm10k_rx_queue_release_mbufs_vec(q);
266 /* free software buffers */
267 for (i = 0; i < q->nb_desc; ++i) {
269 rte_pktmbuf_free_seg(q->sw_ring[i]);
270 q->sw_ring[i] = NULL;
276 * free all queue memory used when releasing the queue (i.e. configure)
279 rx_queue_free(struct fm10k_rx_queue *q)
281 PMD_INIT_FUNC_TRACE();
283 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
286 rte_free(q->sw_ring);
295 * disable RX queue, wait unitl HW finished necessary flush operation
298 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
302 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
303 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
304 reg & ~FM10K_RXQCTL_ENABLE);
306 /* Wait 100us at most */
307 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
309 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
310 if (!(reg & FM10K_RXQCTL_ENABLE))
314 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
321 * reset queue to initial state, allocate software buffers used when starting
325 tx_queue_reset(struct fm10k_tx_queue *q)
327 PMD_INIT_FUNC_TRACE();
331 q->nb_free = q->nb_desc - 1;
332 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
333 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
337 * clean queue, descriptor rings, free software buffers used when stopping
341 tx_queue_clean(struct fm10k_tx_queue *q)
343 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
345 PMD_INIT_FUNC_TRACE();
347 /* zero descriptor rings */
348 for (i = 0; i < q->nb_desc; ++i)
349 q->hw_ring[i] = zero;
351 /* free software buffers */
352 for (i = 0; i < q->nb_desc; ++i) {
354 rte_pktmbuf_free_seg(q->sw_ring[i]);
355 q->sw_ring[i] = NULL;
361 * free all queue memory used when releasing the queue (i.e. configure)
364 tx_queue_free(struct fm10k_tx_queue *q)
366 PMD_INIT_FUNC_TRACE();
368 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
370 if (q->rs_tracker.list) {
371 rte_free(q->rs_tracker.list);
372 q->rs_tracker.list = NULL;
375 rte_free(q->sw_ring);
384 * disable TX queue, wait unitl HW finished necessary flush operation
387 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
391 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
392 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
393 reg & ~FM10K_TXDCTL_ENABLE);
395 /* Wait 100us at most */
396 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
398 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
399 if (!(reg & FM10K_TXDCTL_ENABLE))
403 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
410 fm10k_check_mq_mode(struct rte_eth_dev *dev)
412 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
413 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
414 struct rte_eth_vmdq_rx_conf *vmdq_conf;
415 uint16_t nb_rx_q = dev->data->nb_rx_queues;
417 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
419 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
420 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
424 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
427 if (hw->mac.type == fm10k_mac_vf) {
428 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
432 /* Check VMDQ queue pool number */
433 if (vmdq_conf->nb_queue_pools >
434 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
435 vmdq_conf->nb_queue_pools > nb_rx_q) {
436 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
437 vmdq_conf->nb_queue_pools);
444 static const struct fm10k_txq_ops def_txq_ops = {
445 .reset = tx_queue_reset,
449 fm10k_dev_configure(struct rte_eth_dev *dev)
453 PMD_INIT_FUNC_TRACE();
455 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
456 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
457 /* multipe queue mode checking */
458 ret = fm10k_check_mq_mode(dev);
460 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
468 /* fls = find last set bit = 32 minus the number of leading zeros */
470 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
474 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
476 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
477 struct rte_eth_vmdq_rx_conf *vmdq_conf;
480 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
482 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
483 if (!vmdq_conf->pool_map[i].pools)
486 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
487 fm10k_mbx_unlock(hw);
492 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
494 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
496 /* Add default mac address */
497 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
498 MAIN_VSI_POOL_NUMBER);
502 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
504 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
505 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
506 uint32_t mrqc, *key, i, reta, j;
509 #define RSS_KEY_SIZE 40
510 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
511 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
512 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
513 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
514 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
515 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
518 if (dev->data->nb_rx_queues == 1 ||
519 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
520 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
523 /* random key is rss_intel_key (default) or user provided (rss_key) */
524 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
525 key = (uint32_t *)rss_intel_key;
527 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
529 /* Now fill our hash function seeds, 4 bytes at a time */
530 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
531 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
534 * Fill in redirection table
535 * The byte-swap is needed because NIC registers are in
536 * little-endian order.
539 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
540 if (j == dev->data->nb_rx_queues)
542 reta = (reta << CHAR_BIT) | j;
544 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
549 * Generate RSS hash based on packet types, TCP/UDP
550 * port numbers and/or IPv4/v6 src and dst addresses
552 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
554 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
555 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
556 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
557 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
558 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
559 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
560 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
561 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
562 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
565 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
570 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
574 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
575 uint16_t nb_lport_old, uint16_t nb_lport_new)
577 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
581 /* Disable previous logic ports */
583 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
584 nb_lport_old, false);
585 /* Enable new logic ports */
586 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
588 fm10k_mbx_unlock(hw);
590 for (i = 0; i < nb_lport_new; i++) {
591 /* Set unicast mode by default. App can change
592 * to other mode in other API func.
595 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
596 FM10K_XCAST_MODE_NONE);
597 fm10k_mbx_unlock(hw);
602 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
604 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605 struct rte_eth_vmdq_rx_conf *vmdq_conf;
606 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
607 struct fm10k_macvlan_filter_info *macvlan;
608 uint16_t nb_queue_pools = 0; /* pool number in configuration */
609 uint16_t nb_lport_new, nb_lport_old;
611 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
612 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
614 fm10k_dev_rss_configure(dev);
616 /* only PF supports VMDQ */
617 if (hw->mac.type != fm10k_mac_pf)
620 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
621 nb_queue_pools = vmdq_conf->nb_queue_pools;
623 /* no pool number change, no need to update logic port and VLAN/MAC */
624 if (macvlan->nb_queue_pools == nb_queue_pools)
627 nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
628 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
629 fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
631 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
632 memset(dev->data->mac_addrs, 0,
633 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
634 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
635 &dev->data->mac_addrs[0]);
636 memset(macvlan, 0, sizeof(*macvlan));
637 macvlan->nb_queue_pools = nb_queue_pools;
640 fm10k_dev_vmdq_rx_configure(dev);
642 fm10k_dev_pf_main_vsi_reset(dev);
646 fm10k_dev_tx_init(struct rte_eth_dev *dev)
648 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650 struct fm10k_tx_queue *txq;
654 /* Disable TXINT to avoid possible interrupt */
655 for (i = 0; i < hw->mac.max_queues; i++)
656 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
657 3 << FM10K_TXINT_TIMER_SHIFT);
660 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
661 txq = dev->data->tx_queues[i];
662 base_addr = txq->hw_ring_phys_addr;
663 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
665 /* disable queue to avoid issues while updating state */
666 ret = tx_queue_disable(hw, i);
668 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
672 /* set location and size for descriptor ring */
673 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
674 base_addr & UINT64_LOWER_32BITS_MASK);
675 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
676 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
677 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
680 /* set up vector or scalar TX function as appropriate */
681 fm10k_set_tx_function(dev);
687 fm10k_dev_rx_init(struct rte_eth_dev *dev)
689 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691 struct fm10k_rx_queue *rxq;
694 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
697 /* Disable RXINT to avoid possible interrupt */
698 for (i = 0; i < hw->mac.max_queues; i++)
699 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
700 3 << FM10K_RXINT_TIMER_SHIFT);
702 /* Setup RX queues */
703 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
704 rxq = dev->data->rx_queues[i];
705 base_addr = rxq->hw_ring_phys_addr;
706 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
708 /* disable queue to avoid issues while updating state */
709 ret = rx_queue_disable(hw, i);
711 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
715 /* Setup the Base and Length of the Rx Descriptor Ring */
716 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
717 base_addr & UINT64_LOWER_32BITS_MASK);
718 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
719 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
720 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
722 /* Configure the Rx buffer size for one buff without split */
723 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
724 RTE_PKTMBUF_HEADROOM);
725 /* As RX buffer is aligned to 512B within mbuf, some bytes are
726 * reserved for this purpose, and the worst case could be 511B.
727 * But SRR reg assumes all buffers have the same size. In order
728 * to fill the gap, we'll have to consider the worst case and
729 * assume 512B is reserved. If we don't do so, it's possible
730 * for HW to overwrite data to next mbuf.
732 buf_size -= FM10K_RX_DATABUF_ALIGN;
734 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
735 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
737 /* It adds dual VLAN length for supporting dual VLAN */
738 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
739 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
740 dev->data->dev_conf.rxmode.enable_scatter) {
742 dev->data->scattered_rx = 1;
743 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
744 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
745 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
748 /* Enable drop on empty, it's RO for VF */
749 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
750 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
752 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
753 FM10K_WRITE_FLUSH(hw);
756 /* Configure VMDQ/RSS if applicable */
757 fm10k_dev_mq_rx_configure(dev);
759 /* Decide the best RX function */
760 fm10k_set_rx_function(dev);
766 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
768 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
771 struct fm10k_rx_queue *rxq;
773 PMD_INIT_FUNC_TRACE();
775 if (rx_queue_id < dev->data->nb_rx_queues) {
776 rxq = dev->data->rx_queues[rx_queue_id];
777 err = rx_queue_reset(rxq);
778 if (err == -ENOMEM) {
779 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
781 } else if (err == -EINVAL) {
782 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
787 /* Setup the HW Rx Head and Tail Descriptor Pointers
788 * Note: this must be done AFTER the queue is enabled on real
789 * hardware, but BEFORE the queue is enabled when using the
790 * emulation platform. Do it in both places for now and remove
791 * this comment and the following two register writes when the
792 * emulation platform is no longer being used.
794 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
795 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
797 /* Set PF ownership flag for PF devices */
798 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
799 if (hw->mac.type == fm10k_mac_pf)
800 reg |= FM10K_RXQCTL_PF;
801 reg |= FM10K_RXQCTL_ENABLE;
802 /* enable RX queue */
803 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
804 FM10K_WRITE_FLUSH(hw);
806 /* Setup the HW Rx Head and Tail Descriptor Pointers
807 * Note: this must be done AFTER the queue is enabled
809 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
810 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
811 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
818 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
820 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822 PMD_INIT_FUNC_TRACE();
824 if (rx_queue_id < dev->data->nb_rx_queues) {
825 /* Disable RX queue */
826 rx_queue_disable(hw, rx_queue_id);
828 /* Free mbuf and clean HW ring */
829 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
830 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
837 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
839 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
840 /** @todo - this should be defined in the shared code */
841 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
842 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
845 PMD_INIT_FUNC_TRACE();
847 if (tx_queue_id < dev->data->nb_tx_queues) {
848 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
852 /* reset head and tail pointers */
853 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
854 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
856 /* enable TX queue */
857 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
858 FM10K_TXDCTL_ENABLE | txdctl);
859 FM10K_WRITE_FLUSH(hw);
860 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
868 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
870 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
872 PMD_INIT_FUNC_TRACE();
874 if (tx_queue_id < dev->data->nb_tx_queues) {
875 tx_queue_disable(hw, tx_queue_id);
876 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
877 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
883 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
885 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
886 != FM10K_DGLORTMAP_NONE);
890 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
892 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
895 PMD_INIT_FUNC_TRACE();
897 /* Return if it didn't acquire valid glort range */
898 if (!fm10k_glort_valid(hw))
902 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
903 FM10K_XCAST_MODE_PROMISC);
904 fm10k_mbx_unlock(hw);
906 if (status != FM10K_SUCCESS)
907 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
911 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
913 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
917 PMD_INIT_FUNC_TRACE();
919 /* Return if it didn't acquire valid glort range */
920 if (!fm10k_glort_valid(hw))
923 if (dev->data->all_multicast == 1)
924 mode = FM10K_XCAST_MODE_ALLMULTI;
926 mode = FM10K_XCAST_MODE_NONE;
929 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
931 fm10k_mbx_unlock(hw);
933 if (status != FM10K_SUCCESS)
934 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
938 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
940 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
943 PMD_INIT_FUNC_TRACE();
945 /* Return if it didn't acquire valid glort range */
946 if (!fm10k_glort_valid(hw))
949 /* If promiscuous mode is enabled, it doesn't make sense to enable
950 * allmulticast and disable promiscuous since fm10k only can select
953 if (dev->data->promiscuous) {
954 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
955 "needn't enable allmulticast");
960 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
961 FM10K_XCAST_MODE_ALLMULTI);
962 fm10k_mbx_unlock(hw);
964 if (status != FM10K_SUCCESS)
965 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
969 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
971 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
974 PMD_INIT_FUNC_TRACE();
976 /* Return if it didn't acquire valid glort range */
977 if (!fm10k_glort_valid(hw))
980 if (dev->data->promiscuous) {
981 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
982 "since promisc mode is enabled");
987 /* Change mode to unicast mode */
988 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
989 FM10K_XCAST_MODE_NONE);
990 fm10k_mbx_unlock(hw);
992 if (status != FM10K_SUCCESS)
993 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
997 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
999 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1000 uint32_t dglortdec, pool_len, rss_len, i;
1001 uint16_t nb_queue_pools;
1002 struct fm10k_macvlan_filter_info *macvlan;
1004 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1005 nb_queue_pools = macvlan->nb_queue_pools;
1006 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1007 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1008 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1010 /* Establish only MAP 0 as valid */
1011 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
1013 /* Configure VMDQ/RSS DGlort Decoder */
1014 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1016 /* Invalidate all other GLORT entries */
1017 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
1018 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1019 FM10K_DGLORTMAP_NONE);
1022 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1024 fm10k_dev_start(struct rte_eth_dev *dev)
1026 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1029 PMD_INIT_FUNC_TRACE();
1031 /* stop, init, then start the hw */
1032 diag = fm10k_stop_hw(hw);
1033 if (diag != FM10K_SUCCESS) {
1034 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1038 diag = fm10k_init_hw(hw);
1039 if (diag != FM10K_SUCCESS) {
1040 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1044 diag = fm10k_start_hw(hw);
1045 if (diag != FM10K_SUCCESS) {
1046 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1050 diag = fm10k_dev_tx_init(dev);
1052 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1056 diag = fm10k_dev_rx_init(dev);
1058 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1062 if (hw->mac.type == fm10k_mac_pf)
1063 fm10k_dev_dglort_map_configure(dev);
1065 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1066 struct fm10k_rx_queue *rxq;
1067 rxq = dev->data->rx_queues[i];
1069 if (rxq->rx_deferred_start)
1071 diag = fm10k_dev_rx_queue_start(dev, i);
1074 for (j = 0; j < i; ++j)
1075 rx_queue_clean(dev->data->rx_queues[j]);
1080 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1081 struct fm10k_tx_queue *txq;
1082 txq = dev->data->tx_queues[i];
1084 if (txq->tx_deferred_start)
1086 diag = fm10k_dev_tx_queue_start(dev, i);
1089 for (j = 0; j < i; ++j)
1090 tx_queue_clean(dev->data->tx_queues[j]);
1091 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1092 rx_queue_clean(dev->data->rx_queues[j]);
1097 /* Update default vlan when not in VMDQ mode */
1098 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1099 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1105 fm10k_dev_stop(struct rte_eth_dev *dev)
1109 PMD_INIT_FUNC_TRACE();
1111 if (dev->data->tx_queues)
1112 for (i = 0; i < dev->data->nb_tx_queues; i++)
1113 fm10k_dev_tx_queue_stop(dev, i);
1115 if (dev->data->rx_queues)
1116 for (i = 0; i < dev->data->nb_rx_queues; i++)
1117 fm10k_dev_rx_queue_stop(dev, i);
1121 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1125 PMD_INIT_FUNC_TRACE();
1127 if (dev->data->tx_queues) {
1128 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1129 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1135 if (dev->data->rx_queues) {
1136 for (i = 0; i < dev->data->nb_rx_queues; i++)
1137 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1142 fm10k_dev_close(struct rte_eth_dev *dev)
1144 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146 struct fm10k_macvlan_filter_info *macvlan;
1148 PMD_INIT_FUNC_TRACE();
1150 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1151 nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1153 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1155 fm10k_mbx_unlock(hw);
1157 /* Stop mailbox service first */
1158 fm10k_close_mbx_service(hw);
1159 fm10k_dev_stop(dev);
1160 fm10k_dev_queue_release(dev);
1165 fm10k_link_update(struct rte_eth_dev *dev,
1166 __rte_unused int wait_to_complete)
1168 PMD_INIT_FUNC_TRACE();
1170 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1171 * x8 PCIe interface. For now, we leave the speed undefined since there
1172 * is no 50Gbps Ethernet. */
1173 dev->data->dev_link.link_speed = 0;
1174 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1175 dev->data->dev_link.link_status = 1;
1181 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1184 struct fm10k_hw_stats *hw_stats =
1185 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1186 unsigned i, q, count = 0;
1188 if (n < FM10K_NB_XSTATS)
1189 return FM10K_NB_XSTATS;
1192 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1193 snprintf(xstats[count].name, sizeof(xstats[count].name),
1194 "%s", fm10k_hw_stats_strings[count].name);
1195 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1196 fm10k_hw_stats_strings[count].offset);
1200 /* PF queue stats */
1201 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1202 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1203 snprintf(xstats[count].name, sizeof(xstats[count].name),
1205 fm10k_hw_stats_rx_q_strings[i].name);
1206 xstats[count].value =
1207 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1208 fm10k_hw_stats_rx_q_strings[i].offset);
1211 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1212 snprintf(xstats[count].name, sizeof(xstats[count].name),
1214 fm10k_hw_stats_tx_q_strings[i].name);
1215 xstats[count].value =
1216 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1217 fm10k_hw_stats_tx_q_strings[i].offset);
1222 return FM10K_NB_XSTATS;
1226 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1228 uint64_t ipackets, opackets, ibytes, obytes;
1229 struct fm10k_hw *hw =
1230 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 struct fm10k_hw_stats *hw_stats =
1232 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1235 PMD_INIT_FUNC_TRACE();
1237 fm10k_update_hw_stats(hw, hw_stats);
1239 ipackets = opackets = ibytes = obytes = 0;
1240 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1241 (i < hw->mac.max_queues); ++i) {
1242 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1243 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1244 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1245 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1246 ipackets += stats->q_ipackets[i];
1247 opackets += stats->q_opackets[i];
1248 ibytes += stats->q_ibytes[i];
1249 obytes += stats->q_obytes[i];
1251 stats->ipackets = ipackets;
1252 stats->opackets = opackets;
1253 stats->ibytes = ibytes;
1254 stats->obytes = obytes;
1258 fm10k_stats_reset(struct rte_eth_dev *dev)
1260 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1261 struct fm10k_hw_stats *hw_stats =
1262 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1264 PMD_INIT_FUNC_TRACE();
1266 memset(hw_stats, 0, sizeof(*hw_stats));
1267 fm10k_rebind_hw_stats(hw, hw_stats);
1271 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1272 struct rte_eth_dev_info *dev_info)
1274 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1276 PMD_INIT_FUNC_TRACE();
1278 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1279 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1280 dev_info->max_rx_queues = hw->mac.max_queues;
1281 dev_info->max_tx_queues = hw->mac.max_queues;
1282 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1283 dev_info->max_hash_mac_addrs = 0;
1284 dev_info->max_vfs = dev->pci_dev->max_vfs;
1285 dev_info->vmdq_pool_base = 0;
1286 dev_info->vmdq_queue_base = 0;
1287 dev_info->max_vmdq_pools = ETH_32_POOLS;
1288 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1289 dev_info->rx_offload_capa =
1290 DEV_RX_OFFLOAD_VLAN_STRIP |
1291 DEV_RX_OFFLOAD_IPV4_CKSUM |
1292 DEV_RX_OFFLOAD_UDP_CKSUM |
1293 DEV_RX_OFFLOAD_TCP_CKSUM;
1294 dev_info->tx_offload_capa =
1295 DEV_TX_OFFLOAD_VLAN_INSERT |
1296 DEV_TX_OFFLOAD_IPV4_CKSUM |
1297 DEV_TX_OFFLOAD_UDP_CKSUM |
1298 DEV_TX_OFFLOAD_TCP_CKSUM |
1299 DEV_TX_OFFLOAD_TCP_TSO;
1301 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1302 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1304 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1306 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1307 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1308 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1310 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1314 dev_info->default_txconf = (struct rte_eth_txconf) {
1316 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1317 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1318 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1320 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1321 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1322 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1325 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1326 .nb_max = FM10K_MAX_RX_DESC,
1327 .nb_min = FM10K_MIN_RX_DESC,
1328 .nb_align = FM10K_MULT_RX_DESC,
1331 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1332 .nb_max = FM10K_MAX_TX_DESC,
1333 .nb_min = FM10K_MIN_TX_DESC,
1334 .nb_align = FM10K_MULT_TX_DESC,
1339 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1342 uint16_t mac_num = 0;
1343 uint32_t vid_idx, vid_bit, mac_index;
1344 struct fm10k_hw *hw;
1345 struct fm10k_macvlan_filter_info *macvlan;
1346 struct rte_eth_dev_data *data = dev->data;
1348 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1351 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1352 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1356 if (vlan_id > ETH_VLAN_ID_MAX) {
1357 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1361 vid_idx = FM10K_VFTA_IDX(vlan_id);
1362 vid_bit = FM10K_VFTA_BIT(vlan_id);
1363 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1364 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1366 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1367 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1368 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1369 "in the VLAN filter table");
1374 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1375 fm10k_mbx_unlock(hw);
1376 if (result != FM10K_SUCCESS) {
1377 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1381 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1382 (result == FM10K_SUCCESS); mac_index++) {
1383 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1385 if (mac_num > macvlan->mac_num - 1) {
1386 PMD_INIT_LOG(ERR, "MAC address number "
1391 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1392 data->mac_addrs[mac_index].addr_bytes,
1394 fm10k_mbx_unlock(hw);
1397 if (result != FM10K_SUCCESS) {
1398 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1403 macvlan->vlan_num++;
1404 macvlan->vfta[vid_idx] |= vid_bit;
1406 macvlan->vlan_num--;
1407 macvlan->vfta[vid_idx] &= ~vid_bit;
1413 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1415 if (mask & ETH_VLAN_STRIP_MASK) {
1416 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1417 PMD_INIT_LOG(ERR, "VLAN stripping is "
1418 "always on in fm10k");
1421 if (mask & ETH_VLAN_EXTEND_MASK) {
1422 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1423 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1424 "supported in fm10k");
1427 if (mask & ETH_VLAN_FILTER_MASK) {
1428 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1429 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1433 /* Add/Remove a MAC address, and update filters to main VSI */
1434 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1435 const u8 *mac, bool add, uint32_t pool)
1437 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1438 struct fm10k_macvlan_filter_info *macvlan;
1441 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1443 if (pool != MAIN_VSI_POOL_NUMBER) {
1444 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1445 "mac to pool %u", pool);
1448 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1449 if (!macvlan->vfta[j])
1451 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1452 if (!(macvlan->vfta[j] & (1 << k)))
1454 if (i + 1 > macvlan->vlan_num) {
1455 PMD_INIT_LOG(ERR, "vlan number not match");
1459 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1460 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1461 fm10k_mbx_unlock(hw);
1467 /* Add/Remove a MAC address, and update filters to VMDQ */
1468 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1469 const u8 *mac, bool add, uint32_t pool)
1471 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1472 struct fm10k_macvlan_filter_info *macvlan;
1473 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1476 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1477 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1479 if (pool > macvlan->nb_queue_pools) {
1480 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1482 pool, macvlan->nb_queue_pools);
1485 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1486 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1489 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1490 vmdq_conf->pool_map[i].vlan_id, add, 0);
1491 fm10k_mbx_unlock(hw);
1495 /* Add/Remove a MAC address, and update filters */
1496 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1497 const u8 *mac, bool add, uint32_t pool)
1499 struct fm10k_macvlan_filter_info *macvlan;
1501 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1503 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1504 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1506 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1514 /* Add a MAC address, and update filters */
1516 fm10k_macaddr_add(struct rte_eth_dev *dev,
1517 struct ether_addr *mac_addr,
1521 struct fm10k_macvlan_filter_info *macvlan;
1523 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1524 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1525 macvlan->mac_vmdq_id[index] = pool;
1528 /* Remove a MAC address, and update filters */
1530 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1532 struct rte_eth_dev_data *data = dev->data;
1533 struct fm10k_macvlan_filter_info *macvlan;
1535 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1536 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1537 FALSE, macvlan->mac_vmdq_id[index]);
1538 macvlan->mac_vmdq_id[index] = 0;
1542 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1544 if ((request < min) || (request > max) || ((request % mult) != 0))
1552 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1554 if ((request < min) || (request > max) || ((div % request) != 0))
1561 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1563 uint16_t rx_free_thresh;
1565 if (conf->rx_free_thresh == 0)
1566 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1568 rx_free_thresh = conf->rx_free_thresh;
1570 /* make sure the requested threshold satisfies the constraints */
1571 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1572 FM10K_RX_FREE_THRESH_MAX(q),
1573 FM10K_RX_FREE_THRESH_DIV(q),
1575 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1576 "less than or equal to %u, "
1577 "greater than or equal to %u, "
1578 "and a divisor of %u",
1579 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1580 FM10K_RX_FREE_THRESH_MIN(q),
1581 FM10K_RX_FREE_THRESH_DIV(q));
1585 q->alloc_thresh = rx_free_thresh;
1586 q->drop_en = conf->rx_drop_en;
1587 q->rx_deferred_start = conf->rx_deferred_start;
1593 * Hardware requires specific alignment for Rx packet buffers. At
1594 * least one of the following two conditions must be satisfied.
1595 * 1. Address is 512B aligned
1596 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1598 * As such, the driver may need to adjust the DMA address within the
1599 * buffer by up to 512B.
1601 * return 1 if the element size is valid, otherwise return 0.
1604 mempool_element_size_valid(struct rte_mempool *mp)
1608 /* elt_size includes mbuf header and headroom */
1609 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1610 RTE_PKTMBUF_HEADROOM;
1612 /* account for up to 512B of alignment */
1613 min_size -= FM10K_RX_DATABUF_ALIGN;
1615 /* sanity check for overflow */
1616 if (min_size > mp->elt_size)
1624 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1625 uint16_t nb_desc, unsigned int socket_id,
1626 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1628 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1630 struct fm10k_rx_queue *q;
1631 const struct rte_memzone *mz;
1633 PMD_INIT_FUNC_TRACE();
1635 /* make sure the mempool element size can account for alignment. */
1636 if (!mempool_element_size_valid(mp)) {
1637 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1641 /* make sure a valid number of descriptors have been requested */
1642 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1643 FM10K_MULT_RX_DESC, nb_desc)) {
1644 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1645 "less than or equal to %"PRIu32", "
1646 "greater than or equal to %u, "
1647 "and a multiple of %u",
1648 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1649 FM10K_MULT_RX_DESC);
1654 * if this queue existed already, free the associated memory. The
1655 * queue cannot be reused in case we need to allocate memory on
1656 * different socket than was previously used.
1658 if (dev->data->rx_queues[queue_id] != NULL) {
1659 rx_queue_free(dev->data->rx_queues[queue_id]);
1660 dev->data->rx_queues[queue_id] = NULL;
1663 /* allocate memory for the queue structure */
1664 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1667 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1673 q->nb_desc = nb_desc;
1674 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1675 q->port_id = dev->data->port_id;
1676 q->queue_id = queue_id;
1677 q->tail_ptr = (volatile uint32_t *)
1678 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1679 if (handle_rxconf(q, conf))
1682 /* allocate memory for the software ring */
1683 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1684 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1685 RTE_CACHE_LINE_SIZE, socket_id);
1686 if (q->sw_ring == NULL) {
1687 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1693 * allocate memory for the hardware descriptor ring. A memzone large
1694 * enough to hold the maximum ring size is requested to allow for
1695 * resizing in later calls to the queue setup function.
1697 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1698 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1701 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1702 rte_free(q->sw_ring);
1706 q->hw_ring = mz->addr;
1707 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1709 /* Check if number of descs satisfied Vector requirement */
1710 if (!rte_is_power_of_2(nb_desc)) {
1711 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1712 "preconditions - canceling the feature for "
1713 "the whole port[%d]",
1714 q->queue_id, q->port_id);
1715 dev_info->rx_vec_allowed = false;
1717 fm10k_rxq_vec_setup(q);
1719 dev->data->rx_queues[queue_id] = q;
1724 fm10k_rx_queue_release(void *queue)
1726 PMD_INIT_FUNC_TRACE();
1728 rx_queue_free(queue);
1732 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1734 uint16_t tx_free_thresh;
1735 uint16_t tx_rs_thresh;
1737 /* constraint MACROs require that tx_free_thresh is configured
1738 * before tx_rs_thresh */
1739 if (conf->tx_free_thresh == 0)
1740 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1742 tx_free_thresh = conf->tx_free_thresh;
1744 /* make sure the requested threshold satisfies the constraints */
1745 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1746 FM10K_TX_FREE_THRESH_MAX(q),
1747 FM10K_TX_FREE_THRESH_DIV(q),
1749 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1750 "less than or equal to %u, "
1751 "greater than or equal to %u, "
1752 "and a divisor of %u",
1753 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1754 FM10K_TX_FREE_THRESH_MIN(q),
1755 FM10K_TX_FREE_THRESH_DIV(q));
1759 q->free_thresh = tx_free_thresh;
1761 if (conf->tx_rs_thresh == 0)
1762 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1764 tx_rs_thresh = conf->tx_rs_thresh;
1766 q->tx_deferred_start = conf->tx_deferred_start;
1768 /* make sure the requested threshold satisfies the constraints */
1769 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1770 FM10K_TX_RS_THRESH_MAX(q),
1771 FM10K_TX_RS_THRESH_DIV(q),
1773 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1774 "less than or equal to %u, "
1775 "greater than or equal to %u, "
1776 "and a divisor of %u",
1777 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1778 FM10K_TX_RS_THRESH_MIN(q),
1779 FM10K_TX_RS_THRESH_DIV(q));
1783 q->rs_thresh = tx_rs_thresh;
1789 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1790 uint16_t nb_desc, unsigned int socket_id,
1791 const struct rte_eth_txconf *conf)
1793 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794 struct fm10k_tx_queue *q;
1795 const struct rte_memzone *mz;
1797 PMD_INIT_FUNC_TRACE();
1799 /* make sure a valid number of descriptors have been requested */
1800 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1801 FM10K_MULT_TX_DESC, nb_desc)) {
1802 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1803 "less than or equal to %"PRIu32", "
1804 "greater than or equal to %u, "
1805 "and a multiple of %u",
1806 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1807 FM10K_MULT_TX_DESC);
1812 * if this queue existed already, free the associated memory. The
1813 * queue cannot be reused in case we need to allocate memory on
1814 * different socket than was previously used.
1816 if (dev->data->tx_queues[queue_id] != NULL) {
1817 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1820 dev->data->tx_queues[queue_id] = NULL;
1823 /* allocate memory for the queue structure */
1824 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1827 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1832 q->nb_desc = nb_desc;
1833 q->port_id = dev->data->port_id;
1834 q->queue_id = queue_id;
1835 q->txq_flags = conf->txq_flags;
1836 q->ops = &def_txq_ops;
1837 q->tail_ptr = (volatile uint32_t *)
1838 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1839 if (handle_txconf(q, conf))
1842 /* allocate memory for the software ring */
1843 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1844 nb_desc * sizeof(struct rte_mbuf *),
1845 RTE_CACHE_LINE_SIZE, socket_id);
1846 if (q->sw_ring == NULL) {
1847 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1853 * allocate memory for the hardware descriptor ring. A memzone large
1854 * enough to hold the maximum ring size is requested to allow for
1855 * resizing in later calls to the queue setup function.
1857 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1858 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1861 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1862 rte_free(q->sw_ring);
1866 q->hw_ring = mz->addr;
1867 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1870 * allocate memory for the RS bit tracker. Enough slots to hold the
1871 * descriptor index for each RS bit needing to be set are required.
1873 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1874 ((nb_desc + 1) / q->rs_thresh) *
1876 RTE_CACHE_LINE_SIZE, socket_id);
1877 if (q->rs_tracker.list == NULL) {
1878 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1879 rte_free(q->sw_ring);
1884 dev->data->tx_queues[queue_id] = q;
1889 fm10k_tx_queue_release(void *queue)
1891 struct fm10k_tx_queue *q = queue;
1892 PMD_INIT_FUNC_TRACE();
1898 fm10k_reta_update(struct rte_eth_dev *dev,
1899 struct rte_eth_rss_reta_entry64 *reta_conf,
1902 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1903 uint16_t i, j, idx, shift;
1907 PMD_INIT_FUNC_TRACE();
1909 if (reta_size > FM10K_MAX_RSS_INDICES) {
1910 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1911 "(%d) doesn't match the number hardware can supported "
1912 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1917 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1918 * 128-entries in 32 registers
1920 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1921 idx = i / RTE_RETA_GROUP_SIZE;
1922 shift = i % RTE_RETA_GROUP_SIZE;
1923 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1924 BIT_MASK_PER_UINT32);
1929 if (mask != BIT_MASK_PER_UINT32)
1930 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1932 for (j = 0; j < CHARS_PER_UINT32; j++) {
1933 if (mask & (0x1 << j)) {
1935 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1936 reta |= reta_conf[idx].reta[shift + j] <<
1940 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1947 fm10k_reta_query(struct rte_eth_dev *dev,
1948 struct rte_eth_rss_reta_entry64 *reta_conf,
1951 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1952 uint16_t i, j, idx, shift;
1956 PMD_INIT_FUNC_TRACE();
1958 if (reta_size < FM10K_MAX_RSS_INDICES) {
1959 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1960 "(%d) doesn't match the number hardware can supported "
1961 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1966 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1967 * 128-entries in 32 registers
1969 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1970 idx = i / RTE_RETA_GROUP_SIZE;
1971 shift = i % RTE_RETA_GROUP_SIZE;
1972 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1973 BIT_MASK_PER_UINT32);
1977 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1978 for (j = 0; j < CHARS_PER_UINT32; j++) {
1979 if (mask & (0x1 << j))
1980 reta_conf[idx].reta[shift + j] = ((reta >>
1981 CHAR_BIT * j) & UINT8_MAX);
1989 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1990 struct rte_eth_rss_conf *rss_conf)
1992 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1993 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1995 uint64_t hf = rss_conf->rss_hf;
1998 PMD_INIT_FUNC_TRACE();
2000 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2001 FM10K_RSSRK_ENTRIES_PER_REG)
2008 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2009 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2010 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2011 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2012 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2013 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2014 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2015 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2016 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2018 /* If the mapping doesn't fit any supported, return */
2023 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2024 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2026 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2032 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2033 struct rte_eth_rss_conf *rss_conf)
2035 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2036 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2041 PMD_INIT_FUNC_TRACE();
2043 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2044 FM10K_RSSRK_ENTRIES_PER_REG)
2048 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2049 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2051 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2053 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2054 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2055 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2056 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2057 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2058 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2059 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2060 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2061 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2063 rss_conf->rss_hf = hf;
2069 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2071 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2074 /* Bind all local non-queue interrupt to vector 0 */
2077 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2078 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2079 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2080 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2081 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2082 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2084 /* Enable misc causes */
2085 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2086 FM10K_EIMR_ENABLE(THI_FAULT) |
2087 FM10K_EIMR_ENABLE(FUM_FAULT) |
2088 FM10K_EIMR_ENABLE(MAILBOX) |
2089 FM10K_EIMR_ENABLE(SWITCHREADY) |
2090 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2091 FM10K_EIMR_ENABLE(SRAMERROR) |
2092 FM10K_EIMR_ENABLE(VFLR));
2095 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2096 FM10K_ITR_MASK_CLEAR);
2097 FM10K_WRITE_FLUSH(hw);
2101 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2103 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2108 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2109 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2110 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2111 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2112 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2113 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2115 /* Disable misc causes */
2116 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2117 FM10K_EIMR_DISABLE(THI_FAULT) |
2118 FM10K_EIMR_DISABLE(FUM_FAULT) |
2119 FM10K_EIMR_DISABLE(MAILBOX) |
2120 FM10K_EIMR_DISABLE(SWITCHREADY) |
2121 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2122 FM10K_EIMR_DISABLE(SRAMERROR) |
2123 FM10K_EIMR_DISABLE(VFLR));
2126 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2127 FM10K_WRITE_FLUSH(hw);
2131 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2133 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2134 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2136 /* Bind all local non-queue interrupt to vector 0 */
2139 /* Only INT 0 available, other 15 are reserved. */
2140 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2143 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2144 FM10K_ITR_MASK_CLEAR);
2145 FM10K_WRITE_FLUSH(hw);
2149 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2151 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2152 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2156 /* Only INT 0 available, other 15 are reserved. */
2157 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2160 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2161 FM10K_WRITE_FLUSH(hw);
2165 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2167 struct fm10k_fault fault;
2169 const char *estr = "Unknown error";
2171 /* Process PCA fault */
2172 if (eicr & FM10K_EICR_PCA_FAULT) {
2173 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2176 switch (fault.type) {
2178 estr = "PCA_NO_FAULT"; break;
2179 case PCA_UNMAPPED_ADDR:
2180 estr = "PCA_UNMAPPED_ADDR"; break;
2181 case PCA_BAD_QACCESS_PF:
2182 estr = "PCA_BAD_QACCESS_PF"; break;
2183 case PCA_BAD_QACCESS_VF:
2184 estr = "PCA_BAD_QACCESS_VF"; break;
2185 case PCA_MALICIOUS_REQ:
2186 estr = "PCA_MALICIOUS_REQ"; break;
2187 case PCA_POISONED_TLP:
2188 estr = "PCA_POISONED_TLP"; break;
2190 estr = "PCA_TLP_ABORT"; break;
2194 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2195 estr, fault.func ? "VF" : "PF", fault.func,
2196 fault.address, fault.specinfo);
2199 /* Process THI fault */
2200 if (eicr & FM10K_EICR_THI_FAULT) {
2201 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2204 switch (fault.type) {
2206 estr = "THI_NO_FAULT"; break;
2207 case THI_MAL_DIS_Q_FAULT:
2208 estr = "THI_MAL_DIS_Q_FAULT"; break;
2212 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2213 estr, fault.func ? "VF" : "PF", fault.func,
2214 fault.address, fault.specinfo);
2217 /* Process FUM fault */
2218 if (eicr & FM10K_EICR_FUM_FAULT) {
2219 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2222 switch (fault.type) {
2224 estr = "FUM_NO_FAULT"; break;
2225 case FUM_UNMAPPED_ADDR:
2226 estr = "FUM_UNMAPPED_ADDR"; break;
2227 case FUM_POISONED_TLP:
2228 estr = "FUM_POISONED_TLP"; break;
2229 case FUM_BAD_VF_QACCESS:
2230 estr = "FUM_BAD_VF_QACCESS"; break;
2231 case FUM_ADD_DECODE_ERR:
2232 estr = "FUM_ADD_DECODE_ERR"; break;
2234 estr = "FUM_RO_ERROR"; break;
2235 case FUM_QPRC_CRC_ERROR:
2236 estr = "FUM_QPRC_CRC_ERROR"; break;
2237 case FUM_CSR_TIMEOUT:
2238 estr = "FUM_CSR_TIMEOUT"; break;
2239 case FUM_INVALID_TYPE:
2240 estr = "FUM_INVALID_TYPE"; break;
2241 case FUM_INVALID_LENGTH:
2242 estr = "FUM_INVALID_LENGTH"; break;
2243 case FUM_INVALID_BE:
2244 estr = "FUM_INVALID_BE"; break;
2245 case FUM_INVALID_ALIGN:
2246 estr = "FUM_INVALID_ALIGN"; break;
2250 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2251 estr, fault.func ? "VF" : "PF", fault.func,
2252 fault.address, fault.specinfo);
2257 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2262 * PF interrupt handler triggered by NIC for handling specific interrupt.
2265 * Pointer to interrupt handle.
2267 * The address of parameter (struct rte_eth_dev *) regsitered before.
2273 fm10k_dev_interrupt_handler_pf(
2274 __rte_unused struct rte_intr_handle *handle,
2277 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2278 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2279 uint32_t cause, status;
2281 if (hw->mac.type != fm10k_mac_pf)
2284 cause = FM10K_READ_REG(hw, FM10K_EICR);
2286 /* Handle PCI fault cases */
2287 if (cause & FM10K_EICR_FAULT_MASK) {
2288 PMD_INIT_LOG(ERR, "INT: find fault!");
2289 fm10k_dev_handle_fault(hw, cause);
2292 /* Handle switch up/down */
2293 if (cause & FM10K_EICR_SWITCHNOTREADY)
2294 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2296 if (cause & FM10K_EICR_SWITCHREADY)
2297 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2299 /* Handle mailbox message */
2301 hw->mbx.ops.process(hw, &hw->mbx);
2302 fm10k_mbx_unlock(hw);
2304 /* Handle SRAM error */
2305 if (cause & FM10K_EICR_SRAMERROR) {
2306 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2308 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2309 /* Write to clear pending bits */
2310 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2312 /* Todo: print out error message after shared code updates */
2315 /* Clear these 3 events if having any */
2316 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2317 FM10K_EICR_SWITCHREADY;
2319 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2321 /* Re-enable interrupt from device side */
2322 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2323 FM10K_ITR_MASK_CLEAR);
2324 /* Re-enable interrupt from host side */
2325 rte_intr_enable(&(dev->pci_dev->intr_handle));
2329 * VF interrupt handler triggered by NIC for handling specific interrupt.
2332 * Pointer to interrupt handle.
2334 * The address of parameter (struct rte_eth_dev *) regsitered before.
2340 fm10k_dev_interrupt_handler_vf(
2341 __rte_unused struct rte_intr_handle *handle,
2344 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2345 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2347 if (hw->mac.type != fm10k_mac_vf)
2350 /* Handle mailbox message if lock is acquired */
2352 hw->mbx.ops.process(hw, &hw->mbx);
2353 fm10k_mbx_unlock(hw);
2355 /* Re-enable interrupt from device side */
2356 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2357 FM10K_ITR_MASK_CLEAR);
2358 /* Re-enable interrupt from host side */
2359 rte_intr_enable(&(dev->pci_dev->intr_handle));
2362 /* Mailbox message handler in VF */
2363 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2364 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2365 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2366 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2367 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2370 /* Mailbox message handler in PF */
2371 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2372 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2373 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2374 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2375 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2376 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2377 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2378 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2382 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2386 /* Initialize mailbox lock */
2387 fm10k_mbx_initlock(hw);
2389 /* Replace default message handler with new ones */
2390 if (hw->mac.type == fm10k_mac_pf)
2391 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2393 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2396 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2400 /* Connect to SM for PF device or PF for VF device */
2401 return hw->mbx.ops.connect(hw, &hw->mbx);
2405 fm10k_close_mbx_service(struct fm10k_hw *hw)
2407 /* Disconnect from SM for PF device or PF for VF device */
2408 hw->mbx.ops.disconnect(hw, &hw->mbx);
2411 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2412 .dev_configure = fm10k_dev_configure,
2413 .dev_start = fm10k_dev_start,
2414 .dev_stop = fm10k_dev_stop,
2415 .dev_close = fm10k_dev_close,
2416 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2417 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2418 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2419 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2420 .stats_get = fm10k_stats_get,
2421 .xstats_get = fm10k_xstats_get,
2422 .stats_reset = fm10k_stats_reset,
2423 .xstats_reset = fm10k_stats_reset,
2424 .link_update = fm10k_link_update,
2425 .dev_infos_get = fm10k_dev_infos_get,
2426 .vlan_filter_set = fm10k_vlan_filter_set,
2427 .vlan_offload_set = fm10k_vlan_offload_set,
2428 .mac_addr_add = fm10k_macaddr_add,
2429 .mac_addr_remove = fm10k_macaddr_remove,
2430 .rx_queue_start = fm10k_dev_rx_queue_start,
2431 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2432 .tx_queue_start = fm10k_dev_tx_queue_start,
2433 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2434 .rx_queue_setup = fm10k_rx_queue_setup,
2435 .rx_queue_release = fm10k_rx_queue_release,
2436 .tx_queue_setup = fm10k_tx_queue_setup,
2437 .tx_queue_release = fm10k_tx_queue_release,
2438 .reta_update = fm10k_reta_update,
2439 .reta_query = fm10k_reta_query,
2440 .rss_hash_update = fm10k_rss_hash_update,
2441 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2444 static void __attribute__((cold))
2445 fm10k_set_tx_function(struct rte_eth_dev *dev)
2447 struct fm10k_tx_queue *txq;
2451 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2452 txq = dev->data->tx_queues[i];
2453 /* Check if Vector Tx is satisfied */
2454 if (fm10k_tx_vec_condition_check(txq)) {
2461 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2462 txq = dev->data->tx_queues[i];
2463 fm10k_txq_vec_setup(txq);
2465 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2467 dev->tx_pkt_burst = fm10k_xmit_pkts;
2470 static void __attribute__((cold))
2471 fm10k_set_rx_function(struct rte_eth_dev *dev)
2473 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2474 uint16_t i, rx_using_sse;
2476 /* In order to allow Vector Rx there are a few configuration
2477 * conditions to be met.
2479 if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2480 if (dev->data->scattered_rx)
2481 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2483 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2484 } else if (dev->data->scattered_rx)
2485 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2488 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2489 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2491 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2492 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2494 rxq->rx_using_sse = rx_using_sse;
2499 fm10k_params_init(struct rte_eth_dev *dev)
2501 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2502 struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2504 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2505 * there is no way to get link status without reading BAR4. Until this
2506 * works, assume we have maximum bandwidth.
2507 * @todo - fix bus info
2509 hw->bus_caps.speed = fm10k_bus_speed_8000;
2510 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2511 hw->bus_caps.payload = fm10k_bus_payload_512;
2512 hw->bus.speed = fm10k_bus_speed_8000;
2513 hw->bus.width = fm10k_bus_width_pcie_x8;
2514 hw->bus.payload = fm10k_bus_payload_256;
2516 info->rx_vec_allowed = true;
2520 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2522 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2524 struct fm10k_macvlan_filter_info *macvlan;
2526 PMD_INIT_FUNC_TRACE();
2528 dev->dev_ops = &fm10k_eth_dev_ops;
2529 dev->rx_pkt_burst = &fm10k_recv_pkts;
2530 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2532 /* only initialize in the primary process */
2533 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2536 rte_eth_copy_pci_info(dev, dev->pci_dev);
2538 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2539 memset(macvlan, 0, sizeof(*macvlan));
2540 /* Vendor and Device ID need to be set before init of shared code */
2541 memset(hw, 0, sizeof(*hw));
2542 hw->device_id = dev->pci_dev->id.device_id;
2543 hw->vendor_id = dev->pci_dev->id.vendor_id;
2544 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2545 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2546 hw->revision_id = 0;
2547 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2548 if (hw->hw_addr == NULL) {
2549 PMD_INIT_LOG(ERR, "Bad mem resource."
2550 " Try to blacklist unused devices.");
2554 /* Store fm10k_adapter pointer */
2555 hw->back = dev->data->dev_private;
2557 /* Initialize the shared code */
2558 diag = fm10k_init_shared_code(hw);
2559 if (diag != FM10K_SUCCESS) {
2560 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2564 /* Initialize parameters */
2565 fm10k_params_init(dev);
2567 /* Initialize the hw */
2568 diag = fm10k_init_hw(hw);
2569 if (diag != FM10K_SUCCESS) {
2570 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2574 /* Initialize MAC address(es) */
2575 dev->data->mac_addrs = rte_zmalloc("fm10k",
2576 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2577 if (dev->data->mac_addrs == NULL) {
2578 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2582 diag = fm10k_read_mac_addr(hw);
2584 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2585 &dev->data->mac_addrs[0]);
2587 if (diag != FM10K_SUCCESS ||
2588 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2590 /* Generate a random addr */
2591 eth_random_addr(hw->mac.addr);
2592 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2593 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2594 &dev->data->mac_addrs[0]);
2597 /* Reset the hw statistics */
2598 fm10k_stats_reset(dev);
2601 diag = fm10k_reset_hw(hw);
2602 if (diag != FM10K_SUCCESS) {
2603 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2607 /* Setup mailbox service */
2608 diag = fm10k_setup_mbx_service(hw);
2609 if (diag != FM10K_SUCCESS) {
2610 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2614 /*PF/VF has different interrupt handling mechanism */
2615 if (hw->mac.type == fm10k_mac_pf) {
2616 /* register callback func to eal lib */
2617 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2618 fm10k_dev_interrupt_handler_pf, (void *)dev);
2620 /* enable MISC interrupt */
2621 fm10k_dev_enable_intr_pf(dev);
2623 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2624 fm10k_dev_interrupt_handler_vf, (void *)dev);
2626 fm10k_dev_enable_intr_vf(dev);
2629 /* Enable uio intr after callback registered */
2630 rte_intr_enable(&(dev->pci_dev->intr_handle));
2632 hw->mac.ops.update_int_moderator(hw);
2634 /* Make sure Switch Manager is ready before going forward. */
2635 if (hw->mac.type == fm10k_mac_pf) {
2636 int switch_ready = 0;
2639 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2641 hw->mac.ops.get_host_state(hw, &switch_ready);
2642 fm10k_mbx_unlock(hw);
2645 /* Delay some time to acquire async LPORT_MAP info. */
2646 rte_delay_us(WAIT_SWITCH_MSG_US);
2649 if (switch_ready == 0) {
2650 PMD_INIT_LOG(ERR, "switch is not ready");
2656 * Below function will trigger operations on mailbox, acquire lock to
2657 * avoid race condition from interrupt handler. Operations on mailbox
2658 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2659 * will handle and generate an interrupt to our side. Then, FIFO in
2660 * mailbox will be touched.
2663 /* Enable port first */
2664 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2666 /* Set unicast mode by default. App can change to other mode in other
2669 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2670 FM10K_XCAST_MODE_NONE);
2672 fm10k_mbx_unlock(hw);
2674 /* Add default mac address */
2675 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2676 MAIN_VSI_POOL_NUMBER);
2682 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2684 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2686 PMD_INIT_FUNC_TRACE();
2688 /* only uninitialize in the primary process */
2689 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2692 /* safe to close dev here */
2693 fm10k_dev_close(dev);
2695 dev->dev_ops = NULL;
2696 dev->rx_pkt_burst = NULL;
2697 dev->tx_pkt_burst = NULL;
2699 /* disable uio/vfio intr */
2700 rte_intr_disable(&(dev->pci_dev->intr_handle));
2702 /*PF/VF has different interrupt handling mechanism */
2703 if (hw->mac.type == fm10k_mac_pf) {
2704 /* disable interrupt */
2705 fm10k_dev_disable_intr_pf(dev);
2707 /* unregister callback func to eal lib */
2708 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2709 fm10k_dev_interrupt_handler_pf, (void *)dev);
2711 /* disable interrupt */
2712 fm10k_dev_disable_intr_vf(dev);
2714 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2715 fm10k_dev_interrupt_handler_vf, (void *)dev);
2718 /* free mac memory */
2719 if (dev->data->mac_addrs) {
2720 rte_free(dev->data->mac_addrs);
2721 dev->data->mac_addrs = NULL;
2724 memset(hw, 0, sizeof(*hw));
2730 * The set of PCI devices this driver supports. This driver will enable both PF
2731 * and SRIOV-VF devices.
2733 static const struct rte_pci_id pci_id_fm10k_map[] = {
2734 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2735 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2736 #include "rte_pci_dev_ids.h"
2737 { .vendor_id = 0, /* sentinel */ },
2740 static struct eth_driver rte_pmd_fm10k = {
2742 .name = "rte_pmd_fm10k",
2743 .id_table = pci_id_fm10k_map,
2744 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2746 .eth_dev_init = eth_fm10k_dev_init,
2747 .eth_dev_uninit = eth_fm10k_dev_uninit,
2748 .dev_private_size = sizeof(struct fm10k_adapter),
2752 * Driver initialization routine.
2753 * Invoked once at EAL init time.
2754 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2757 rte_pmd_fm10k_init(__rte_unused const char *name,
2758 __rte_unused const char *params)
2760 PMD_INIT_FUNC_TRACE();
2761 rte_eth_driver_register(&rte_pmd_fm10k);
2765 static struct rte_driver rte_fm10k_driver = {
2767 .init = rte_pmd_fm10k_init,
2770 PMD_REGISTER_DRIVER(rte_fm10k_driver);