4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 #define MAIN_VSI_POOL_NUMBER 0
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US 100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
59 #define MAX_LPORT_NUM 128
60 #define GLORT_FD_Q_BASE 0x40
61 #define GLORT_PF_MASK 0xFFC0
62 #define GLORT_FD_MASK GLORT_PF_MASK
63 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
65 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
66 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
67 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
68 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
69 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
70 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
72 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
73 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
74 const u8 *mac, bool add, uint32_t pool);
75 static void fm10k_tx_queue_release(void *queue);
76 static void fm10k_rx_queue_release(void *queue);
77 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
78 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
80 struct fm10k_xstats_name_off {
81 char name[RTE_ETH_XSTATS_NAME_SIZE];
85 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
86 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
87 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
88 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
89 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
90 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
91 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
92 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
93 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
97 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
98 sizeof(fm10k_hw_stats_strings[0]))
100 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
101 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
102 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
103 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
106 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
107 sizeof(fm10k_hw_stats_rx_q_strings[0]))
109 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
110 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
111 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
114 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
115 sizeof(fm10k_hw_stats_tx_q_strings[0]))
117 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
118 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
121 fm10k_mbx_initlock(struct fm10k_hw *hw)
123 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
127 fm10k_mbx_lock(struct fm10k_hw *hw)
129 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
130 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
134 fm10k_mbx_unlock(struct fm10k_hw *hw)
136 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
139 /* Stubs needed for linkage when vPMD is disabled */
140 int __attribute__((weak))
141 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
146 uint16_t __attribute__((weak))
148 __rte_unused void *rx_queue,
149 __rte_unused struct rte_mbuf **rx_pkts,
150 __rte_unused uint16_t nb_pkts)
155 uint16_t __attribute__((weak))
156 fm10k_recv_scattered_pkts_vec(
157 __rte_unused void *rx_queue,
158 __rte_unused struct rte_mbuf **rx_pkts,
159 __rte_unused uint16_t nb_pkts)
164 int __attribute__((weak))
165 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
171 void __attribute__((weak))
172 fm10k_rx_queue_release_mbufs_vec(
173 __rte_unused struct fm10k_rx_queue *rxq)
178 void __attribute__((weak))
179 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
184 int __attribute__((weak))
185 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
190 uint16_t __attribute__((weak))
191 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
192 __rte_unused struct rte_mbuf **tx_pkts,
193 __rte_unused uint16_t nb_pkts)
199 * reset queue to initial state, allocate software buffers used when starting
201 * return 0 on success
202 * return -ENOMEM if buffers cannot be allocated
203 * return -EINVAL if buffers do not satisfy alignment condition
206 rx_queue_reset(struct fm10k_rx_queue *q)
208 static const union fm10k_rx_desc zero = {{0} };
211 PMD_INIT_FUNC_TRACE();
213 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
217 for (i = 0; i < q->nb_desc; ++i) {
218 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
219 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
220 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
224 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
225 q->hw_ring[i].q.pkt_addr = dma_addr;
226 q->hw_ring[i].q.hdr_addr = dma_addr;
229 /* initialize extra software ring entries. Space for these extra
230 * entries is always allocated.
232 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
233 for (i = 0; i < q->nb_fake_desc; ++i) {
234 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
235 q->hw_ring[q->nb_desc + i] = zero;
240 q->next_trigger = q->alloc_thresh - 1;
241 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
242 q->rxrearm_start = 0;
249 * clean queue, descriptor rings, free software buffers used when stopping
253 rx_queue_clean(struct fm10k_rx_queue *q)
255 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
257 PMD_INIT_FUNC_TRACE();
259 /* zero descriptor rings */
260 for (i = 0; i < q->nb_desc; ++i)
261 q->hw_ring[i] = zero;
263 /* zero faked descriptors */
264 for (i = 0; i < q->nb_fake_desc; ++i)
265 q->hw_ring[q->nb_desc + i] = zero;
267 /* vPMD driver has a different way of releasing mbufs. */
268 if (q->rx_using_sse) {
269 fm10k_rx_queue_release_mbufs_vec(q);
273 /* free software buffers */
274 for (i = 0; i < q->nb_desc; ++i) {
276 rte_pktmbuf_free_seg(q->sw_ring[i]);
277 q->sw_ring[i] = NULL;
283 * free all queue memory used when releasing the queue (i.e. configure)
286 rx_queue_free(struct fm10k_rx_queue *q)
288 PMD_INIT_FUNC_TRACE();
290 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
293 rte_free(q->sw_ring);
302 * disable RX queue, wait unitl HW finished necessary flush operation
305 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
309 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
310 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
311 reg & ~FM10K_RXQCTL_ENABLE);
313 /* Wait 100us at most */
314 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
316 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
317 if (!(reg & FM10K_RXQCTL_ENABLE))
321 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
328 * reset queue to initial state, allocate software buffers used when starting
332 tx_queue_reset(struct fm10k_tx_queue *q)
334 PMD_INIT_FUNC_TRACE();
338 q->nb_free = q->nb_desc - 1;
339 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
340 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
344 * clean queue, descriptor rings, free software buffers used when stopping
348 tx_queue_clean(struct fm10k_tx_queue *q)
350 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
352 PMD_INIT_FUNC_TRACE();
354 /* zero descriptor rings */
355 for (i = 0; i < q->nb_desc; ++i)
356 q->hw_ring[i] = zero;
358 /* free software buffers */
359 for (i = 0; i < q->nb_desc; ++i) {
361 rte_pktmbuf_free_seg(q->sw_ring[i]);
362 q->sw_ring[i] = NULL;
368 * free all queue memory used when releasing the queue (i.e. configure)
371 tx_queue_free(struct fm10k_tx_queue *q)
373 PMD_INIT_FUNC_TRACE();
375 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
377 if (q->rs_tracker.list) {
378 rte_free(q->rs_tracker.list);
379 q->rs_tracker.list = NULL;
382 rte_free(q->sw_ring);
391 * disable TX queue, wait unitl HW finished necessary flush operation
394 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
398 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
399 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
400 reg & ~FM10K_TXDCTL_ENABLE);
402 /* Wait 100us at most */
403 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
405 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
406 if (!(reg & FM10K_TXDCTL_ENABLE))
410 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
417 fm10k_check_mq_mode(struct rte_eth_dev *dev)
419 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
420 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
421 struct rte_eth_vmdq_rx_conf *vmdq_conf;
422 uint16_t nb_rx_q = dev->data->nb_rx_queues;
424 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
426 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
427 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
431 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
434 if (hw->mac.type == fm10k_mac_vf) {
435 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
439 /* Check VMDQ queue pool number */
440 if (vmdq_conf->nb_queue_pools >
441 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
442 vmdq_conf->nb_queue_pools > nb_rx_q) {
443 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
444 vmdq_conf->nb_queue_pools);
451 static const struct fm10k_txq_ops def_txq_ops = {
452 .reset = tx_queue_reset,
456 fm10k_dev_configure(struct rte_eth_dev *dev)
460 PMD_INIT_FUNC_TRACE();
462 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
463 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
464 /* multipe queue mode checking */
465 ret = fm10k_check_mq_mode(dev);
467 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
475 /* fls = find last set bit = 32 minus the number of leading zeros */
477 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
481 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
483 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484 struct rte_eth_vmdq_rx_conf *vmdq_conf;
487 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
489 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
490 if (!vmdq_conf->pool_map[i].pools)
493 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
494 fm10k_mbx_unlock(hw);
499 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
501 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
503 /* Add default mac address */
504 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
505 MAIN_VSI_POOL_NUMBER);
509 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
511 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
512 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
513 uint32_t mrqc, *key, i, reta, j;
516 #define RSS_KEY_SIZE 40
517 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
518 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
519 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
520 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
521 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
522 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
525 if (dev->data->nb_rx_queues == 1 ||
526 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
527 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
530 /* random key is rss_intel_key (default) or user provided (rss_key) */
531 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
532 key = (uint32_t *)rss_intel_key;
534 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
536 /* Now fill our hash function seeds, 4 bytes at a time */
537 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
538 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
541 * Fill in redirection table
542 * The byte-swap is needed because NIC registers are in
543 * little-endian order.
546 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
547 if (j == dev->data->nb_rx_queues)
549 reta = (reta << CHAR_BIT) | j;
551 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
556 * Generate RSS hash based on packet types, TCP/UDP
557 * port numbers and/or IPv4/v6 src and dst addresses
559 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
561 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
562 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
563 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
564 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
565 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
566 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
567 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
568 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
569 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
572 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
577 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
581 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
583 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586 for (i = 0; i < nb_lport_new; i++) {
587 /* Set unicast mode by default. App can change
588 * to other mode in other API func.
591 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
592 FM10K_XCAST_MODE_NONE);
593 fm10k_mbx_unlock(hw);
598 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
600 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601 struct rte_eth_vmdq_rx_conf *vmdq_conf;
602 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
603 struct fm10k_macvlan_filter_info *macvlan;
604 uint16_t nb_queue_pools = 0; /* pool number in configuration */
605 uint16_t nb_lport_new;
607 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
608 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
610 fm10k_dev_rss_configure(dev);
612 /* only PF supports VMDQ */
613 if (hw->mac.type != fm10k_mac_pf)
616 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
617 nb_queue_pools = vmdq_conf->nb_queue_pools;
619 /* no pool number change, no need to update logic port and VLAN/MAC */
620 if (macvlan->nb_queue_pools == nb_queue_pools)
623 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
624 fm10k_dev_logic_port_update(dev, nb_lport_new);
626 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
627 memset(dev->data->mac_addrs, 0,
628 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
629 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
630 &dev->data->mac_addrs[0]);
631 memset(macvlan, 0, sizeof(*macvlan));
632 macvlan->nb_queue_pools = nb_queue_pools;
635 fm10k_dev_vmdq_rx_configure(dev);
637 fm10k_dev_pf_main_vsi_reset(dev);
641 fm10k_dev_tx_init(struct rte_eth_dev *dev)
643 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645 struct fm10k_tx_queue *txq;
649 /* Disable TXINT to avoid possible interrupt */
650 for (i = 0; i < hw->mac.max_queues; i++)
651 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
652 3 << FM10K_TXINT_TIMER_SHIFT);
655 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
656 txq = dev->data->tx_queues[i];
657 base_addr = txq->hw_ring_phys_addr;
658 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
660 /* disable queue to avoid issues while updating state */
661 ret = tx_queue_disable(hw, i);
663 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
667 /* set location and size for descriptor ring */
668 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
669 base_addr & UINT64_LOWER_32BITS_MASK);
670 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
671 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
672 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
675 /* set up vector or scalar TX function as appropriate */
676 fm10k_set_tx_function(dev);
682 fm10k_dev_rx_init(struct rte_eth_dev *dev)
684 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686 struct fm10k_rx_queue *rxq;
689 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
692 /* Disable RXINT to avoid possible interrupt */
693 for (i = 0; i < hw->mac.max_queues; i++)
694 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
695 3 << FM10K_RXINT_TIMER_SHIFT);
697 /* Setup RX queues */
698 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
699 rxq = dev->data->rx_queues[i];
700 base_addr = rxq->hw_ring_phys_addr;
701 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
703 /* disable queue to avoid issues while updating state */
704 ret = rx_queue_disable(hw, i);
706 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
710 /* Setup the Base and Length of the Rx Descriptor Ring */
711 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
712 base_addr & UINT64_LOWER_32BITS_MASK);
713 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
714 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
715 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
717 /* Configure the Rx buffer size for one buff without split */
718 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
719 RTE_PKTMBUF_HEADROOM);
720 /* As RX buffer is aligned to 512B within mbuf, some bytes are
721 * reserved for this purpose, and the worst case could be 511B.
722 * But SRR reg assumes all buffers have the same size. In order
723 * to fill the gap, we'll have to consider the worst case and
724 * assume 512B is reserved. If we don't do so, it's possible
725 * for HW to overwrite data to next mbuf.
727 buf_size -= FM10K_RX_DATABUF_ALIGN;
729 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
730 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
732 /* It adds dual VLAN length for supporting dual VLAN */
733 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
734 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
735 dev->data->dev_conf.rxmode.enable_scatter) {
737 dev->data->scattered_rx = 1;
738 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
739 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
740 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
743 /* Enable drop on empty, it's RO for VF */
744 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
745 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
747 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
748 FM10K_WRITE_FLUSH(hw);
751 /* Configure VMDQ/RSS if applicable */
752 fm10k_dev_mq_rx_configure(dev);
754 /* Decide the best RX function */
755 fm10k_set_rx_function(dev);
761 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
763 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
766 struct fm10k_rx_queue *rxq;
768 PMD_INIT_FUNC_TRACE();
770 if (rx_queue_id < dev->data->nb_rx_queues) {
771 rxq = dev->data->rx_queues[rx_queue_id];
772 err = rx_queue_reset(rxq);
773 if (err == -ENOMEM) {
774 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
776 } else if (err == -EINVAL) {
777 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
782 /* Setup the HW Rx Head and Tail Descriptor Pointers
783 * Note: this must be done AFTER the queue is enabled on real
784 * hardware, but BEFORE the queue is enabled when using the
785 * emulation platform. Do it in both places for now and remove
786 * this comment and the following two register writes when the
787 * emulation platform is no longer being used.
789 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
790 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
792 /* Set PF ownership flag for PF devices */
793 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
794 if (hw->mac.type == fm10k_mac_pf)
795 reg |= FM10K_RXQCTL_PF;
796 reg |= FM10K_RXQCTL_ENABLE;
797 /* enable RX queue */
798 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
799 FM10K_WRITE_FLUSH(hw);
801 /* Setup the HW Rx Head and Tail Descriptor Pointers
802 * Note: this must be done AFTER the queue is enabled
804 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
805 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
806 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
813 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
815 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
817 PMD_INIT_FUNC_TRACE();
819 if (rx_queue_id < dev->data->nb_rx_queues) {
820 /* Disable RX queue */
821 rx_queue_disable(hw, rx_queue_id);
823 /* Free mbuf and clean HW ring */
824 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
825 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
832 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
834 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835 /** @todo - this should be defined in the shared code */
836 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
837 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
840 PMD_INIT_FUNC_TRACE();
842 if (tx_queue_id < dev->data->nb_tx_queues) {
843 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
847 /* reset head and tail pointers */
848 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
849 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
851 /* enable TX queue */
852 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
853 FM10K_TXDCTL_ENABLE | txdctl);
854 FM10K_WRITE_FLUSH(hw);
855 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
863 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
865 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867 PMD_INIT_FUNC_TRACE();
869 if (tx_queue_id < dev->data->nb_tx_queues) {
870 tx_queue_disable(hw, tx_queue_id);
871 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
872 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
878 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
880 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
881 != FM10K_DGLORTMAP_NONE);
885 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
887 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
890 PMD_INIT_FUNC_TRACE();
892 /* Return if it didn't acquire valid glort range */
893 if (!fm10k_glort_valid(hw))
897 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
898 FM10K_XCAST_MODE_PROMISC);
899 fm10k_mbx_unlock(hw);
901 if (status != FM10K_SUCCESS)
902 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
906 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
908 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
912 PMD_INIT_FUNC_TRACE();
914 /* Return if it didn't acquire valid glort range */
915 if (!fm10k_glort_valid(hw))
918 if (dev->data->all_multicast == 1)
919 mode = FM10K_XCAST_MODE_ALLMULTI;
921 mode = FM10K_XCAST_MODE_NONE;
924 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
926 fm10k_mbx_unlock(hw);
928 if (status != FM10K_SUCCESS)
929 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
933 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
935 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
938 PMD_INIT_FUNC_TRACE();
940 /* Return if it didn't acquire valid glort range */
941 if (!fm10k_glort_valid(hw))
944 /* If promiscuous mode is enabled, it doesn't make sense to enable
945 * allmulticast and disable promiscuous since fm10k only can select
948 if (dev->data->promiscuous) {
949 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
950 "needn't enable allmulticast");
955 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
956 FM10K_XCAST_MODE_ALLMULTI);
957 fm10k_mbx_unlock(hw);
959 if (status != FM10K_SUCCESS)
960 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
964 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
966 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
969 PMD_INIT_FUNC_TRACE();
971 /* Return if it didn't acquire valid glort range */
972 if (!fm10k_glort_valid(hw))
975 if (dev->data->promiscuous) {
976 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
977 "since promisc mode is enabled");
982 /* Change mode to unicast mode */
983 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
984 FM10K_XCAST_MODE_NONE);
985 fm10k_mbx_unlock(hw);
987 if (status != FM10K_SUCCESS)
988 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
992 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
994 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
995 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
996 uint16_t nb_queue_pools;
997 struct fm10k_macvlan_filter_info *macvlan;
999 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1000 nb_queue_pools = macvlan->nb_queue_pools;
1001 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1002 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1004 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1005 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1006 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1008 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1009 /* Configure VMDQ/RSS DGlort Decoder */
1010 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1012 /* Flow Director configurations, only queue number is valid. */
1013 dglortdec = fls(dev->data->nb_rx_queues - 1);
1014 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1015 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1016 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1017 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1019 /* Invalidate all other GLORT entries */
1020 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1021 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1022 FM10K_DGLORTMAP_NONE);
1025 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1027 fm10k_dev_start(struct rte_eth_dev *dev)
1029 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1032 PMD_INIT_FUNC_TRACE();
1034 /* stop, init, then start the hw */
1035 diag = fm10k_stop_hw(hw);
1036 if (diag != FM10K_SUCCESS) {
1037 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1041 diag = fm10k_init_hw(hw);
1042 if (diag != FM10K_SUCCESS) {
1043 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1047 diag = fm10k_start_hw(hw);
1048 if (diag != FM10K_SUCCESS) {
1049 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1053 diag = fm10k_dev_tx_init(dev);
1055 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1059 diag = fm10k_dev_rx_init(dev);
1061 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1065 if (hw->mac.type == fm10k_mac_pf)
1066 fm10k_dev_dglort_map_configure(dev);
1068 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1069 struct fm10k_rx_queue *rxq;
1070 rxq = dev->data->rx_queues[i];
1072 if (rxq->rx_deferred_start)
1074 diag = fm10k_dev_rx_queue_start(dev, i);
1077 for (j = 0; j < i; ++j)
1078 rx_queue_clean(dev->data->rx_queues[j]);
1083 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1084 struct fm10k_tx_queue *txq;
1085 txq = dev->data->tx_queues[i];
1087 if (txq->tx_deferred_start)
1089 diag = fm10k_dev_tx_queue_start(dev, i);
1092 for (j = 0; j < i; ++j)
1093 tx_queue_clean(dev->data->tx_queues[j]);
1094 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1095 rx_queue_clean(dev->data->rx_queues[j]);
1100 /* Update default vlan when not in VMDQ mode */
1101 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1102 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1108 fm10k_dev_stop(struct rte_eth_dev *dev)
1112 PMD_INIT_FUNC_TRACE();
1114 if (dev->data->tx_queues)
1115 for (i = 0; i < dev->data->nb_tx_queues; i++)
1116 fm10k_dev_tx_queue_stop(dev, i);
1118 if (dev->data->rx_queues)
1119 for (i = 0; i < dev->data->nb_rx_queues; i++)
1120 fm10k_dev_rx_queue_stop(dev, i);
1124 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1128 PMD_INIT_FUNC_TRACE();
1130 if (dev->data->tx_queues) {
1131 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1132 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1138 if (dev->data->rx_queues) {
1139 for (i = 0; i < dev->data->nb_rx_queues; i++)
1140 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1145 fm10k_dev_close(struct rte_eth_dev *dev)
1147 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149 PMD_INIT_FUNC_TRACE();
1152 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1153 MAX_LPORT_NUM, false);
1154 fm10k_mbx_unlock(hw);
1156 /* Stop mailbox service first */
1157 fm10k_close_mbx_service(hw);
1158 fm10k_dev_stop(dev);
1159 fm10k_dev_queue_release(dev);
1164 fm10k_link_update(struct rte_eth_dev *dev,
1165 __rte_unused int wait_to_complete)
1167 PMD_INIT_FUNC_TRACE();
1169 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1170 * x8 PCIe interface. For now, we leave the speed undefined since there
1171 * is no 50Gbps Ethernet. */
1172 dev->data->dev_link.link_speed = 0;
1173 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1174 dev->data->dev_link.link_status = 1;
1180 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1183 struct fm10k_hw_stats *hw_stats =
1184 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1185 unsigned i, q, count = 0;
1187 if (n < FM10K_NB_XSTATS)
1188 return FM10K_NB_XSTATS;
1191 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1192 snprintf(xstats[count].name, sizeof(xstats[count].name),
1193 "%s", fm10k_hw_stats_strings[count].name);
1194 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1195 fm10k_hw_stats_strings[count].offset);
1199 /* PF queue stats */
1200 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1201 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1202 snprintf(xstats[count].name, sizeof(xstats[count].name),
1204 fm10k_hw_stats_rx_q_strings[i].name);
1205 xstats[count].value =
1206 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1207 fm10k_hw_stats_rx_q_strings[i].offset);
1210 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1211 snprintf(xstats[count].name, sizeof(xstats[count].name),
1213 fm10k_hw_stats_tx_q_strings[i].name);
1214 xstats[count].value =
1215 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1216 fm10k_hw_stats_tx_q_strings[i].offset);
1221 return FM10K_NB_XSTATS;
1225 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1227 uint64_t ipackets, opackets, ibytes, obytes;
1228 struct fm10k_hw *hw =
1229 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 struct fm10k_hw_stats *hw_stats =
1231 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1234 PMD_INIT_FUNC_TRACE();
1236 fm10k_update_hw_stats(hw, hw_stats);
1238 ipackets = opackets = ibytes = obytes = 0;
1239 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1240 (i < hw->mac.max_queues); ++i) {
1241 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1242 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1243 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1244 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1245 ipackets += stats->q_ipackets[i];
1246 opackets += stats->q_opackets[i];
1247 ibytes += stats->q_ibytes[i];
1248 obytes += stats->q_obytes[i];
1250 stats->ipackets = ipackets;
1251 stats->opackets = opackets;
1252 stats->ibytes = ibytes;
1253 stats->obytes = obytes;
1257 fm10k_stats_reset(struct rte_eth_dev *dev)
1259 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 struct fm10k_hw_stats *hw_stats =
1261 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1263 PMD_INIT_FUNC_TRACE();
1265 memset(hw_stats, 0, sizeof(*hw_stats));
1266 fm10k_rebind_hw_stats(hw, hw_stats);
1270 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1271 struct rte_eth_dev_info *dev_info)
1273 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1275 PMD_INIT_FUNC_TRACE();
1277 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1278 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1279 dev_info->max_rx_queues = hw->mac.max_queues;
1280 dev_info->max_tx_queues = hw->mac.max_queues;
1281 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1282 dev_info->max_hash_mac_addrs = 0;
1283 dev_info->max_vfs = dev->pci_dev->max_vfs;
1284 dev_info->vmdq_pool_base = 0;
1285 dev_info->vmdq_queue_base = 0;
1286 dev_info->max_vmdq_pools = ETH_32_POOLS;
1287 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1288 dev_info->rx_offload_capa =
1289 DEV_RX_OFFLOAD_VLAN_STRIP |
1290 DEV_RX_OFFLOAD_IPV4_CKSUM |
1291 DEV_RX_OFFLOAD_UDP_CKSUM |
1292 DEV_RX_OFFLOAD_TCP_CKSUM;
1293 dev_info->tx_offload_capa =
1294 DEV_TX_OFFLOAD_VLAN_INSERT |
1295 DEV_TX_OFFLOAD_IPV4_CKSUM |
1296 DEV_TX_OFFLOAD_UDP_CKSUM |
1297 DEV_TX_OFFLOAD_TCP_CKSUM |
1298 DEV_TX_OFFLOAD_TCP_TSO;
1300 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1301 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1303 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1305 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1306 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1307 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1309 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1313 dev_info->default_txconf = (struct rte_eth_txconf) {
1315 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1316 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1317 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1319 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1320 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1321 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1324 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1325 .nb_max = FM10K_MAX_RX_DESC,
1326 .nb_min = FM10K_MIN_RX_DESC,
1327 .nb_align = FM10K_MULT_RX_DESC,
1330 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1331 .nb_max = FM10K_MAX_TX_DESC,
1332 .nb_min = FM10K_MIN_TX_DESC,
1333 .nb_align = FM10K_MULT_TX_DESC,
1338 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1341 uint16_t mac_num = 0;
1342 uint32_t vid_idx, vid_bit, mac_index;
1343 struct fm10k_hw *hw;
1344 struct fm10k_macvlan_filter_info *macvlan;
1345 struct rte_eth_dev_data *data = dev->data;
1347 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1348 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1350 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1351 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1355 if (vlan_id > ETH_VLAN_ID_MAX) {
1356 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1360 vid_idx = FM10K_VFTA_IDX(vlan_id);
1361 vid_bit = FM10K_VFTA_BIT(vlan_id);
1362 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1363 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1365 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1366 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1367 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1368 "in the VLAN filter table");
1373 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1374 fm10k_mbx_unlock(hw);
1375 if (result != FM10K_SUCCESS) {
1376 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1380 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1381 (result == FM10K_SUCCESS); mac_index++) {
1382 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1384 if (mac_num > macvlan->mac_num - 1) {
1385 PMD_INIT_LOG(ERR, "MAC address number "
1390 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1391 data->mac_addrs[mac_index].addr_bytes,
1393 fm10k_mbx_unlock(hw);
1396 if (result != FM10K_SUCCESS) {
1397 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1402 macvlan->vlan_num++;
1403 macvlan->vfta[vid_idx] |= vid_bit;
1405 macvlan->vlan_num--;
1406 macvlan->vfta[vid_idx] &= ~vid_bit;
1412 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1414 if (mask & ETH_VLAN_STRIP_MASK) {
1415 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1416 PMD_INIT_LOG(ERR, "VLAN stripping is "
1417 "always on in fm10k");
1420 if (mask & ETH_VLAN_EXTEND_MASK) {
1421 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1422 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1423 "supported in fm10k");
1426 if (mask & ETH_VLAN_FILTER_MASK) {
1427 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1428 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1432 /* Add/Remove a MAC address, and update filters to main VSI */
1433 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1434 const u8 *mac, bool add, uint32_t pool)
1436 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1437 struct fm10k_macvlan_filter_info *macvlan;
1440 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1442 if (pool != MAIN_VSI_POOL_NUMBER) {
1443 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1444 "mac to pool %u", pool);
1447 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1448 if (!macvlan->vfta[j])
1450 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1451 if (!(macvlan->vfta[j] & (1 << k)))
1453 if (i + 1 > macvlan->vlan_num) {
1454 PMD_INIT_LOG(ERR, "vlan number not match");
1458 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1459 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1460 fm10k_mbx_unlock(hw);
1466 /* Add/Remove a MAC address, and update filters to VMDQ */
1467 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1468 const u8 *mac, bool add, uint32_t pool)
1470 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1471 struct fm10k_macvlan_filter_info *macvlan;
1472 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1475 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1476 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1478 if (pool > macvlan->nb_queue_pools) {
1479 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1481 pool, macvlan->nb_queue_pools);
1484 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1485 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1488 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1489 vmdq_conf->pool_map[i].vlan_id, add, 0);
1490 fm10k_mbx_unlock(hw);
1494 /* Add/Remove a MAC address, and update filters */
1495 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1496 const u8 *mac, bool add, uint32_t pool)
1498 struct fm10k_macvlan_filter_info *macvlan;
1500 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1502 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1503 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1505 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1513 /* Add a MAC address, and update filters */
1515 fm10k_macaddr_add(struct rte_eth_dev *dev,
1516 struct ether_addr *mac_addr,
1520 struct fm10k_macvlan_filter_info *macvlan;
1522 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1523 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1524 macvlan->mac_vmdq_id[index] = pool;
1527 /* Remove a MAC address, and update filters */
1529 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1531 struct rte_eth_dev_data *data = dev->data;
1532 struct fm10k_macvlan_filter_info *macvlan;
1534 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1535 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1536 FALSE, macvlan->mac_vmdq_id[index]);
1537 macvlan->mac_vmdq_id[index] = 0;
1541 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1543 if ((request < min) || (request > max) || ((request % mult) != 0))
1551 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1553 if ((request < min) || (request > max) || ((div % request) != 0))
1560 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1562 uint16_t rx_free_thresh;
1564 if (conf->rx_free_thresh == 0)
1565 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1567 rx_free_thresh = conf->rx_free_thresh;
1569 /* make sure the requested threshold satisfies the constraints */
1570 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1571 FM10K_RX_FREE_THRESH_MAX(q),
1572 FM10K_RX_FREE_THRESH_DIV(q),
1574 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1575 "less than or equal to %u, "
1576 "greater than or equal to %u, "
1577 "and a divisor of %u",
1578 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1579 FM10K_RX_FREE_THRESH_MIN(q),
1580 FM10K_RX_FREE_THRESH_DIV(q));
1584 q->alloc_thresh = rx_free_thresh;
1585 q->drop_en = conf->rx_drop_en;
1586 q->rx_deferred_start = conf->rx_deferred_start;
1592 * Hardware requires specific alignment for Rx packet buffers. At
1593 * least one of the following two conditions must be satisfied.
1594 * 1. Address is 512B aligned
1595 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1597 * As such, the driver may need to adjust the DMA address within the
1598 * buffer by up to 512B.
1600 * return 1 if the element size is valid, otherwise return 0.
1603 mempool_element_size_valid(struct rte_mempool *mp)
1607 /* elt_size includes mbuf header and headroom */
1608 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1609 RTE_PKTMBUF_HEADROOM;
1611 /* account for up to 512B of alignment */
1612 min_size -= FM10K_RX_DATABUF_ALIGN;
1614 /* sanity check for overflow */
1615 if (min_size > mp->elt_size)
1623 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1624 uint16_t nb_desc, unsigned int socket_id,
1625 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1627 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1628 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1629 struct fm10k_rx_queue *q;
1630 const struct rte_memzone *mz;
1632 PMD_INIT_FUNC_TRACE();
1634 /* make sure the mempool element size can account for alignment. */
1635 if (!mempool_element_size_valid(mp)) {
1636 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1640 /* make sure a valid number of descriptors have been requested */
1641 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1642 FM10K_MULT_RX_DESC, nb_desc)) {
1643 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1644 "less than or equal to %"PRIu32", "
1645 "greater than or equal to %u, "
1646 "and a multiple of %u",
1647 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1648 FM10K_MULT_RX_DESC);
1653 * if this queue existed already, free the associated memory. The
1654 * queue cannot be reused in case we need to allocate memory on
1655 * different socket than was previously used.
1657 if (dev->data->rx_queues[queue_id] != NULL) {
1658 rx_queue_free(dev->data->rx_queues[queue_id]);
1659 dev->data->rx_queues[queue_id] = NULL;
1662 /* allocate memory for the queue structure */
1663 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1666 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1672 q->nb_desc = nb_desc;
1673 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1674 q->port_id = dev->data->port_id;
1675 q->queue_id = queue_id;
1676 q->tail_ptr = (volatile uint32_t *)
1677 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1678 if (handle_rxconf(q, conf))
1681 /* allocate memory for the software ring */
1682 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1683 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1684 RTE_CACHE_LINE_SIZE, socket_id);
1685 if (q->sw_ring == NULL) {
1686 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1692 * allocate memory for the hardware descriptor ring. A memzone large
1693 * enough to hold the maximum ring size is requested to allow for
1694 * resizing in later calls to the queue setup function.
1696 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1697 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1700 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1701 rte_free(q->sw_ring);
1705 q->hw_ring = mz->addr;
1706 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1708 /* Check if number of descs satisfied Vector requirement */
1709 if (!rte_is_power_of_2(nb_desc)) {
1710 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1711 "preconditions - canceling the feature for "
1712 "the whole port[%d]",
1713 q->queue_id, q->port_id);
1714 dev_info->rx_vec_allowed = false;
1716 fm10k_rxq_vec_setup(q);
1718 dev->data->rx_queues[queue_id] = q;
1723 fm10k_rx_queue_release(void *queue)
1725 PMD_INIT_FUNC_TRACE();
1727 rx_queue_free(queue);
1731 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1733 uint16_t tx_free_thresh;
1734 uint16_t tx_rs_thresh;
1736 /* constraint MACROs require that tx_free_thresh is configured
1737 * before tx_rs_thresh */
1738 if (conf->tx_free_thresh == 0)
1739 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1741 tx_free_thresh = conf->tx_free_thresh;
1743 /* make sure the requested threshold satisfies the constraints */
1744 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1745 FM10K_TX_FREE_THRESH_MAX(q),
1746 FM10K_TX_FREE_THRESH_DIV(q),
1748 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1749 "less than or equal to %u, "
1750 "greater than or equal to %u, "
1751 "and a divisor of %u",
1752 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1753 FM10K_TX_FREE_THRESH_MIN(q),
1754 FM10K_TX_FREE_THRESH_DIV(q));
1758 q->free_thresh = tx_free_thresh;
1760 if (conf->tx_rs_thresh == 0)
1761 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1763 tx_rs_thresh = conf->tx_rs_thresh;
1765 q->tx_deferred_start = conf->tx_deferred_start;
1767 /* make sure the requested threshold satisfies the constraints */
1768 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1769 FM10K_TX_RS_THRESH_MAX(q),
1770 FM10K_TX_RS_THRESH_DIV(q),
1772 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1773 "less than or equal to %u, "
1774 "greater than or equal to %u, "
1775 "and a divisor of %u",
1776 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1777 FM10K_TX_RS_THRESH_MIN(q),
1778 FM10K_TX_RS_THRESH_DIV(q));
1782 q->rs_thresh = tx_rs_thresh;
1788 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1789 uint16_t nb_desc, unsigned int socket_id,
1790 const struct rte_eth_txconf *conf)
1792 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1793 struct fm10k_tx_queue *q;
1794 const struct rte_memzone *mz;
1796 PMD_INIT_FUNC_TRACE();
1798 /* make sure a valid number of descriptors have been requested */
1799 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1800 FM10K_MULT_TX_DESC, nb_desc)) {
1801 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1802 "less than or equal to %"PRIu32", "
1803 "greater than or equal to %u, "
1804 "and a multiple of %u",
1805 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1806 FM10K_MULT_TX_DESC);
1811 * if this queue existed already, free the associated memory. The
1812 * queue cannot be reused in case we need to allocate memory on
1813 * different socket than was previously used.
1815 if (dev->data->tx_queues[queue_id] != NULL) {
1816 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1819 dev->data->tx_queues[queue_id] = NULL;
1822 /* allocate memory for the queue structure */
1823 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1826 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1831 q->nb_desc = nb_desc;
1832 q->port_id = dev->data->port_id;
1833 q->queue_id = queue_id;
1834 q->txq_flags = conf->txq_flags;
1835 q->ops = &def_txq_ops;
1836 q->tail_ptr = (volatile uint32_t *)
1837 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1838 if (handle_txconf(q, conf))
1841 /* allocate memory for the software ring */
1842 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1843 nb_desc * sizeof(struct rte_mbuf *),
1844 RTE_CACHE_LINE_SIZE, socket_id);
1845 if (q->sw_ring == NULL) {
1846 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1852 * allocate memory for the hardware descriptor ring. A memzone large
1853 * enough to hold the maximum ring size is requested to allow for
1854 * resizing in later calls to the queue setup function.
1856 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1857 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1860 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1861 rte_free(q->sw_ring);
1865 q->hw_ring = mz->addr;
1866 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1869 * allocate memory for the RS bit tracker. Enough slots to hold the
1870 * descriptor index for each RS bit needing to be set are required.
1872 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1873 ((nb_desc + 1) / q->rs_thresh) *
1875 RTE_CACHE_LINE_SIZE, socket_id);
1876 if (q->rs_tracker.list == NULL) {
1877 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1878 rte_free(q->sw_ring);
1883 dev->data->tx_queues[queue_id] = q;
1888 fm10k_tx_queue_release(void *queue)
1890 struct fm10k_tx_queue *q = queue;
1891 PMD_INIT_FUNC_TRACE();
1897 fm10k_reta_update(struct rte_eth_dev *dev,
1898 struct rte_eth_rss_reta_entry64 *reta_conf,
1901 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1902 uint16_t i, j, idx, shift;
1906 PMD_INIT_FUNC_TRACE();
1908 if (reta_size > FM10K_MAX_RSS_INDICES) {
1909 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1910 "(%d) doesn't match the number hardware can supported "
1911 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1916 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1917 * 128-entries in 32 registers
1919 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1920 idx = i / RTE_RETA_GROUP_SIZE;
1921 shift = i % RTE_RETA_GROUP_SIZE;
1922 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1923 BIT_MASK_PER_UINT32);
1928 if (mask != BIT_MASK_PER_UINT32)
1929 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1931 for (j = 0; j < CHARS_PER_UINT32; j++) {
1932 if (mask & (0x1 << j)) {
1934 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1935 reta |= reta_conf[idx].reta[shift + j] <<
1939 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1946 fm10k_reta_query(struct rte_eth_dev *dev,
1947 struct rte_eth_rss_reta_entry64 *reta_conf,
1950 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1951 uint16_t i, j, idx, shift;
1955 PMD_INIT_FUNC_TRACE();
1957 if (reta_size < FM10K_MAX_RSS_INDICES) {
1958 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1959 "(%d) doesn't match the number hardware can supported "
1960 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1965 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1966 * 128-entries in 32 registers
1968 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1969 idx = i / RTE_RETA_GROUP_SIZE;
1970 shift = i % RTE_RETA_GROUP_SIZE;
1971 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1972 BIT_MASK_PER_UINT32);
1976 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1977 for (j = 0; j < CHARS_PER_UINT32; j++) {
1978 if (mask & (0x1 << j))
1979 reta_conf[idx].reta[shift + j] = ((reta >>
1980 CHAR_BIT * j) & UINT8_MAX);
1988 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1989 struct rte_eth_rss_conf *rss_conf)
1991 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1992 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1994 uint64_t hf = rss_conf->rss_hf;
1997 PMD_INIT_FUNC_TRACE();
1999 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2000 FM10K_RSSRK_ENTRIES_PER_REG)
2007 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2008 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2009 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2010 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2011 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2012 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2013 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2014 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2015 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2017 /* If the mapping doesn't fit any supported, return */
2022 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2023 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2025 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2031 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2032 struct rte_eth_rss_conf *rss_conf)
2034 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2035 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2040 PMD_INIT_FUNC_TRACE();
2042 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2043 FM10K_RSSRK_ENTRIES_PER_REG)
2047 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2048 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2050 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2052 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2053 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2054 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2055 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2056 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2057 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2058 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2059 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2060 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2062 rss_conf->rss_hf = hf;
2068 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2070 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2073 /* Bind all local non-queue interrupt to vector 0 */
2076 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2077 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2078 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2079 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2080 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2081 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2083 /* Enable misc causes */
2084 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2085 FM10K_EIMR_ENABLE(THI_FAULT) |
2086 FM10K_EIMR_ENABLE(FUM_FAULT) |
2087 FM10K_EIMR_ENABLE(MAILBOX) |
2088 FM10K_EIMR_ENABLE(SWITCHREADY) |
2089 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2090 FM10K_EIMR_ENABLE(SRAMERROR) |
2091 FM10K_EIMR_ENABLE(VFLR));
2094 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2095 FM10K_ITR_MASK_CLEAR);
2096 FM10K_WRITE_FLUSH(hw);
2100 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2102 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2103 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2107 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2108 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2109 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2110 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2111 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2112 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2114 /* Disable misc causes */
2115 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2116 FM10K_EIMR_DISABLE(THI_FAULT) |
2117 FM10K_EIMR_DISABLE(FUM_FAULT) |
2118 FM10K_EIMR_DISABLE(MAILBOX) |
2119 FM10K_EIMR_DISABLE(SWITCHREADY) |
2120 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2121 FM10K_EIMR_DISABLE(SRAMERROR) |
2122 FM10K_EIMR_DISABLE(VFLR));
2125 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2126 FM10K_WRITE_FLUSH(hw);
2130 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2132 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2133 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2135 /* Bind all local non-queue interrupt to vector 0 */
2138 /* Only INT 0 available, other 15 are reserved. */
2139 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2142 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2143 FM10K_ITR_MASK_CLEAR);
2144 FM10K_WRITE_FLUSH(hw);
2148 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2150 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2151 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2155 /* Only INT 0 available, other 15 are reserved. */
2156 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2159 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2160 FM10K_WRITE_FLUSH(hw);
2164 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2166 struct fm10k_fault fault;
2168 const char *estr = "Unknown error";
2170 /* Process PCA fault */
2171 if (eicr & FM10K_EICR_PCA_FAULT) {
2172 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2175 switch (fault.type) {
2177 estr = "PCA_NO_FAULT"; break;
2178 case PCA_UNMAPPED_ADDR:
2179 estr = "PCA_UNMAPPED_ADDR"; break;
2180 case PCA_BAD_QACCESS_PF:
2181 estr = "PCA_BAD_QACCESS_PF"; break;
2182 case PCA_BAD_QACCESS_VF:
2183 estr = "PCA_BAD_QACCESS_VF"; break;
2184 case PCA_MALICIOUS_REQ:
2185 estr = "PCA_MALICIOUS_REQ"; break;
2186 case PCA_POISONED_TLP:
2187 estr = "PCA_POISONED_TLP"; break;
2189 estr = "PCA_TLP_ABORT"; break;
2193 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2194 estr, fault.func ? "VF" : "PF", fault.func,
2195 fault.address, fault.specinfo);
2198 /* Process THI fault */
2199 if (eicr & FM10K_EICR_THI_FAULT) {
2200 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2203 switch (fault.type) {
2205 estr = "THI_NO_FAULT"; break;
2206 case THI_MAL_DIS_Q_FAULT:
2207 estr = "THI_MAL_DIS_Q_FAULT"; break;
2211 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2212 estr, fault.func ? "VF" : "PF", fault.func,
2213 fault.address, fault.specinfo);
2216 /* Process FUM fault */
2217 if (eicr & FM10K_EICR_FUM_FAULT) {
2218 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2221 switch (fault.type) {
2223 estr = "FUM_NO_FAULT"; break;
2224 case FUM_UNMAPPED_ADDR:
2225 estr = "FUM_UNMAPPED_ADDR"; break;
2226 case FUM_POISONED_TLP:
2227 estr = "FUM_POISONED_TLP"; break;
2228 case FUM_BAD_VF_QACCESS:
2229 estr = "FUM_BAD_VF_QACCESS"; break;
2230 case FUM_ADD_DECODE_ERR:
2231 estr = "FUM_ADD_DECODE_ERR"; break;
2233 estr = "FUM_RO_ERROR"; break;
2234 case FUM_QPRC_CRC_ERROR:
2235 estr = "FUM_QPRC_CRC_ERROR"; break;
2236 case FUM_CSR_TIMEOUT:
2237 estr = "FUM_CSR_TIMEOUT"; break;
2238 case FUM_INVALID_TYPE:
2239 estr = "FUM_INVALID_TYPE"; break;
2240 case FUM_INVALID_LENGTH:
2241 estr = "FUM_INVALID_LENGTH"; break;
2242 case FUM_INVALID_BE:
2243 estr = "FUM_INVALID_BE"; break;
2244 case FUM_INVALID_ALIGN:
2245 estr = "FUM_INVALID_ALIGN"; break;
2249 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2250 estr, fault.func ? "VF" : "PF", fault.func,
2251 fault.address, fault.specinfo);
2256 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2261 * PF interrupt handler triggered by NIC for handling specific interrupt.
2264 * Pointer to interrupt handle.
2266 * The address of parameter (struct rte_eth_dev *) regsitered before.
2272 fm10k_dev_interrupt_handler_pf(
2273 __rte_unused struct rte_intr_handle *handle,
2276 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2277 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2278 uint32_t cause, status;
2280 if (hw->mac.type != fm10k_mac_pf)
2283 cause = FM10K_READ_REG(hw, FM10K_EICR);
2285 /* Handle PCI fault cases */
2286 if (cause & FM10K_EICR_FAULT_MASK) {
2287 PMD_INIT_LOG(ERR, "INT: find fault!");
2288 fm10k_dev_handle_fault(hw, cause);
2291 /* Handle switch up/down */
2292 if (cause & FM10K_EICR_SWITCHNOTREADY)
2293 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2295 if (cause & FM10K_EICR_SWITCHREADY)
2296 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2298 /* Handle mailbox message */
2300 hw->mbx.ops.process(hw, &hw->mbx);
2301 fm10k_mbx_unlock(hw);
2303 /* Handle SRAM error */
2304 if (cause & FM10K_EICR_SRAMERROR) {
2305 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2307 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2308 /* Write to clear pending bits */
2309 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2311 /* Todo: print out error message after shared code updates */
2314 /* Clear these 3 events if having any */
2315 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2316 FM10K_EICR_SWITCHREADY;
2318 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2320 /* Re-enable interrupt from device side */
2321 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2322 FM10K_ITR_MASK_CLEAR);
2323 /* Re-enable interrupt from host side */
2324 rte_intr_enable(&(dev->pci_dev->intr_handle));
2328 * VF interrupt handler triggered by NIC for handling specific interrupt.
2331 * Pointer to interrupt handle.
2333 * The address of parameter (struct rte_eth_dev *) regsitered before.
2339 fm10k_dev_interrupt_handler_vf(
2340 __rte_unused struct rte_intr_handle *handle,
2343 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2344 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2346 if (hw->mac.type != fm10k_mac_vf)
2349 /* Handle mailbox message if lock is acquired */
2351 hw->mbx.ops.process(hw, &hw->mbx);
2352 fm10k_mbx_unlock(hw);
2354 /* Re-enable interrupt from device side */
2355 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2356 FM10K_ITR_MASK_CLEAR);
2357 /* Re-enable interrupt from host side */
2358 rte_intr_enable(&(dev->pci_dev->intr_handle));
2361 /* Mailbox message handler in VF */
2362 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2363 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2364 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2365 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2366 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2369 /* Mailbox message handler in PF */
2370 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2371 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2372 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2373 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2374 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2375 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2376 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2377 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2381 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2385 /* Initialize mailbox lock */
2386 fm10k_mbx_initlock(hw);
2388 /* Replace default message handler with new ones */
2389 if (hw->mac.type == fm10k_mac_pf)
2390 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2392 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2395 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2399 /* Connect to SM for PF device or PF for VF device */
2400 return hw->mbx.ops.connect(hw, &hw->mbx);
2404 fm10k_close_mbx_service(struct fm10k_hw *hw)
2406 /* Disconnect from SM for PF device or PF for VF device */
2407 hw->mbx.ops.disconnect(hw, &hw->mbx);
2410 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2411 .dev_configure = fm10k_dev_configure,
2412 .dev_start = fm10k_dev_start,
2413 .dev_stop = fm10k_dev_stop,
2414 .dev_close = fm10k_dev_close,
2415 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2416 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2417 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2418 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2419 .stats_get = fm10k_stats_get,
2420 .xstats_get = fm10k_xstats_get,
2421 .stats_reset = fm10k_stats_reset,
2422 .xstats_reset = fm10k_stats_reset,
2423 .link_update = fm10k_link_update,
2424 .dev_infos_get = fm10k_dev_infos_get,
2425 .vlan_filter_set = fm10k_vlan_filter_set,
2426 .vlan_offload_set = fm10k_vlan_offload_set,
2427 .mac_addr_add = fm10k_macaddr_add,
2428 .mac_addr_remove = fm10k_macaddr_remove,
2429 .rx_queue_start = fm10k_dev_rx_queue_start,
2430 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2431 .tx_queue_start = fm10k_dev_tx_queue_start,
2432 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2433 .rx_queue_setup = fm10k_rx_queue_setup,
2434 .rx_queue_release = fm10k_rx_queue_release,
2435 .tx_queue_setup = fm10k_tx_queue_setup,
2436 .tx_queue_release = fm10k_tx_queue_release,
2437 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2438 .reta_update = fm10k_reta_update,
2439 .reta_query = fm10k_reta_query,
2440 .rss_hash_update = fm10k_rss_hash_update,
2441 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2444 static void __attribute__((cold))
2445 fm10k_set_tx_function(struct rte_eth_dev *dev)
2447 struct fm10k_tx_queue *txq;
2451 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2452 txq = dev->data->tx_queues[i];
2453 /* Check if Vector Tx is satisfied */
2454 if (fm10k_tx_vec_condition_check(txq)) {
2461 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2462 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2463 txq = dev->data->tx_queues[i];
2464 fm10k_txq_vec_setup(txq);
2466 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2468 dev->tx_pkt_burst = fm10k_xmit_pkts;
2469 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2473 static void __attribute__((cold))
2474 fm10k_set_rx_function(struct rte_eth_dev *dev)
2476 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2477 uint16_t i, rx_using_sse;
2479 /* In order to allow Vector Rx there are a few configuration
2480 * conditions to be met.
2482 if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2483 if (dev->data->scattered_rx)
2484 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2486 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2487 } else if (dev->data->scattered_rx)
2488 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2490 dev->rx_pkt_burst = fm10k_recv_pkts;
2493 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2494 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2497 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2499 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2501 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2502 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2504 rxq->rx_using_sse = rx_using_sse;
2509 fm10k_params_init(struct rte_eth_dev *dev)
2511 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2512 struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2514 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2515 * there is no way to get link status without reading BAR4. Until this
2516 * works, assume we have maximum bandwidth.
2517 * @todo - fix bus info
2519 hw->bus_caps.speed = fm10k_bus_speed_8000;
2520 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2521 hw->bus_caps.payload = fm10k_bus_payload_512;
2522 hw->bus.speed = fm10k_bus_speed_8000;
2523 hw->bus.width = fm10k_bus_width_pcie_x8;
2524 hw->bus.payload = fm10k_bus_payload_256;
2526 info->rx_vec_allowed = true;
2530 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2532 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2534 struct fm10k_macvlan_filter_info *macvlan;
2536 PMD_INIT_FUNC_TRACE();
2538 dev->dev_ops = &fm10k_eth_dev_ops;
2539 dev->rx_pkt_burst = &fm10k_recv_pkts;
2540 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2542 /* only initialize in the primary process */
2543 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2546 rte_eth_copy_pci_info(dev, dev->pci_dev);
2548 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2549 memset(macvlan, 0, sizeof(*macvlan));
2550 /* Vendor and Device ID need to be set before init of shared code */
2551 memset(hw, 0, sizeof(*hw));
2552 hw->device_id = dev->pci_dev->id.device_id;
2553 hw->vendor_id = dev->pci_dev->id.vendor_id;
2554 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2555 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2556 hw->revision_id = 0;
2557 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2558 if (hw->hw_addr == NULL) {
2559 PMD_INIT_LOG(ERR, "Bad mem resource."
2560 " Try to blacklist unused devices.");
2564 /* Store fm10k_adapter pointer */
2565 hw->back = dev->data->dev_private;
2567 /* Initialize the shared code */
2568 diag = fm10k_init_shared_code(hw);
2569 if (diag != FM10K_SUCCESS) {
2570 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2574 /* Initialize parameters */
2575 fm10k_params_init(dev);
2577 /* Initialize the hw */
2578 diag = fm10k_init_hw(hw);
2579 if (diag != FM10K_SUCCESS) {
2580 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2584 /* Initialize MAC address(es) */
2585 dev->data->mac_addrs = rte_zmalloc("fm10k",
2586 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2587 if (dev->data->mac_addrs == NULL) {
2588 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2592 diag = fm10k_read_mac_addr(hw);
2594 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2595 &dev->data->mac_addrs[0]);
2597 if (diag != FM10K_SUCCESS ||
2598 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2600 /* Generate a random addr */
2601 eth_random_addr(hw->mac.addr);
2602 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2603 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2604 &dev->data->mac_addrs[0]);
2607 /* Reset the hw statistics */
2608 fm10k_stats_reset(dev);
2611 diag = fm10k_reset_hw(hw);
2612 if (diag != FM10K_SUCCESS) {
2613 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2617 /* Setup mailbox service */
2618 diag = fm10k_setup_mbx_service(hw);
2619 if (diag != FM10K_SUCCESS) {
2620 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2624 /*PF/VF has different interrupt handling mechanism */
2625 if (hw->mac.type == fm10k_mac_pf) {
2626 /* register callback func to eal lib */
2627 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2628 fm10k_dev_interrupt_handler_pf, (void *)dev);
2630 /* enable MISC interrupt */
2631 fm10k_dev_enable_intr_pf(dev);
2633 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2634 fm10k_dev_interrupt_handler_vf, (void *)dev);
2636 fm10k_dev_enable_intr_vf(dev);
2639 /* Enable uio intr after callback registered */
2640 rte_intr_enable(&(dev->pci_dev->intr_handle));
2642 hw->mac.ops.update_int_moderator(hw);
2644 /* Make sure Switch Manager is ready before going forward. */
2645 if (hw->mac.type == fm10k_mac_pf) {
2646 int switch_ready = 0;
2649 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2651 hw->mac.ops.get_host_state(hw, &switch_ready);
2652 fm10k_mbx_unlock(hw);
2655 /* Delay some time to acquire async LPORT_MAP info. */
2656 rte_delay_us(WAIT_SWITCH_MSG_US);
2659 if (switch_ready == 0) {
2660 PMD_INIT_LOG(ERR, "switch is not ready");
2666 * Below function will trigger operations on mailbox, acquire lock to
2667 * avoid race condition from interrupt handler. Operations on mailbox
2668 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2669 * will handle and generate an interrupt to our side. Then, FIFO in
2670 * mailbox will be touched.
2673 /* Enable port first */
2674 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2677 /* Set unicast mode by default. App can change to other mode in other
2680 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2681 FM10K_XCAST_MODE_NONE);
2683 fm10k_mbx_unlock(hw);
2685 /* Add default mac address */
2686 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2687 MAIN_VSI_POOL_NUMBER);
2693 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2695 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2697 PMD_INIT_FUNC_TRACE();
2699 /* only uninitialize in the primary process */
2700 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2703 /* safe to close dev here */
2704 fm10k_dev_close(dev);
2706 dev->dev_ops = NULL;
2707 dev->rx_pkt_burst = NULL;
2708 dev->tx_pkt_burst = NULL;
2710 /* disable uio/vfio intr */
2711 rte_intr_disable(&(dev->pci_dev->intr_handle));
2713 /*PF/VF has different interrupt handling mechanism */
2714 if (hw->mac.type == fm10k_mac_pf) {
2715 /* disable interrupt */
2716 fm10k_dev_disable_intr_pf(dev);
2718 /* unregister callback func to eal lib */
2719 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2720 fm10k_dev_interrupt_handler_pf, (void *)dev);
2722 /* disable interrupt */
2723 fm10k_dev_disable_intr_vf(dev);
2725 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2726 fm10k_dev_interrupt_handler_vf, (void *)dev);
2729 /* free mac memory */
2730 if (dev->data->mac_addrs) {
2731 rte_free(dev->data->mac_addrs);
2732 dev->data->mac_addrs = NULL;
2735 memset(hw, 0, sizeof(*hw));
2741 * The set of PCI devices this driver supports. This driver will enable both PF
2742 * and SRIOV-VF devices.
2744 static const struct rte_pci_id pci_id_fm10k_map[] = {
2745 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2746 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2747 #include "rte_pci_dev_ids.h"
2748 { .vendor_id = 0, /* sentinel */ },
2751 static struct eth_driver rte_pmd_fm10k = {
2753 .name = "rte_pmd_fm10k",
2754 .id_table = pci_id_fm10k_map,
2755 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2757 .eth_dev_init = eth_fm10k_dev_init,
2758 .eth_dev_uninit = eth_fm10k_dev_uninit,
2759 .dev_private_size = sizeof(struct fm10k_adapter),
2763 * Driver initialization routine.
2764 * Invoked once at EAL init time.
2765 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2768 rte_pmd_fm10k_init(__rte_unused const char *name,
2769 __rte_unused const char *params)
2771 PMD_INIT_FUNC_TRACE();
2772 rte_eth_driver_register(&rte_pmd_fm10k);
2776 static struct rte_driver rte_fm10k_driver = {
2778 .init = rte_pmd_fm10k_init,
2781 PMD_REGISTER_DRIVER(rte_fm10k_driver);