4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 #define MAIN_VSI_POOL_NUMBER 0
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US 100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58 #define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
59 ETH_TXQ_FLAGS_NOOFFLOADS)
61 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
62 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
63 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
64 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
65 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
66 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
68 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
69 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
70 const u8 *mac, bool add, uint32_t pool);
71 static void fm10k_tx_queue_release(void *queue);
72 static void fm10k_rx_queue_release(void *queue);
73 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
74 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
76 struct fm10k_xstats_name_off {
77 char name[RTE_ETH_XSTATS_NAME_SIZE];
81 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
82 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
83 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
84 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
85 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
86 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
87 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
88 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
89 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
93 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
94 sizeof(fm10k_hw_stats_strings[0]))
96 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
97 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
98 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
99 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
102 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
103 sizeof(fm10k_hw_stats_rx_q_strings[0]))
105 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
106 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
107 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
110 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
111 sizeof(fm10k_hw_stats_tx_q_strings[0]))
113 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
114 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
117 fm10k_mbx_initlock(struct fm10k_hw *hw)
119 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
123 fm10k_mbx_lock(struct fm10k_hw *hw)
125 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
126 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
130 fm10k_mbx_unlock(struct fm10k_hw *hw)
132 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
136 * reset queue to initial state, allocate software buffers used when starting
138 * return 0 on success
139 * return -ENOMEM if buffers cannot be allocated
140 * return -EINVAL if buffers do not satisfy alignment condition
143 rx_queue_reset(struct fm10k_rx_queue *q)
145 static const union fm10k_rx_desc zero = {{0} };
148 PMD_INIT_FUNC_TRACE();
150 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
154 for (i = 0; i < q->nb_desc; ++i) {
155 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
156 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
157 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
161 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
162 q->hw_ring[i].q.pkt_addr = dma_addr;
163 q->hw_ring[i].q.hdr_addr = dma_addr;
166 /* initialize extra software ring entries. Space for these extra
167 * entries is always allocated.
169 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
170 for (i = 0; i < q->nb_fake_desc; ++i) {
171 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
172 q->hw_ring[q->nb_desc + i] = zero;
177 q->next_trigger = q->alloc_thresh - 1;
178 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
179 q->rxrearm_start = 0;
186 * clean queue, descriptor rings, free software buffers used when stopping
190 rx_queue_clean(struct fm10k_rx_queue *q)
192 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
194 PMD_INIT_FUNC_TRACE();
196 /* zero descriptor rings */
197 for (i = 0; i < q->nb_desc; ++i)
198 q->hw_ring[i] = zero;
200 /* zero faked descriptors */
201 for (i = 0; i < q->nb_fake_desc; ++i)
202 q->hw_ring[q->nb_desc + i] = zero;
204 /* vPMD driver has a different way of releasing mbufs. */
205 if (q->rx_using_sse) {
206 fm10k_rx_queue_release_mbufs_vec(q);
210 /* free software buffers */
211 for (i = 0; i < q->nb_desc; ++i) {
213 rte_pktmbuf_free_seg(q->sw_ring[i]);
214 q->sw_ring[i] = NULL;
220 * free all queue memory used when releasing the queue (i.e. configure)
223 rx_queue_free(struct fm10k_rx_queue *q)
225 PMD_INIT_FUNC_TRACE();
227 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
230 rte_free(q->sw_ring);
239 * disable RX queue, wait unitl HW finished necessary flush operation
242 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
246 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
247 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
248 reg & ~FM10K_RXQCTL_ENABLE);
250 /* Wait 100us at most */
251 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
253 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
254 if (!(reg & FM10K_RXQCTL_ENABLE))
258 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
265 * reset queue to initial state, allocate software buffers used when starting
269 tx_queue_reset(struct fm10k_tx_queue *q)
271 PMD_INIT_FUNC_TRACE();
275 q->nb_free = q->nb_desc - 1;
276 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
277 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
281 * clean queue, descriptor rings, free software buffers used when stopping
285 tx_queue_clean(struct fm10k_tx_queue *q)
287 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
289 PMD_INIT_FUNC_TRACE();
291 /* zero descriptor rings */
292 for (i = 0; i < q->nb_desc; ++i)
293 q->hw_ring[i] = zero;
295 /* free software buffers */
296 for (i = 0; i < q->nb_desc; ++i) {
298 rte_pktmbuf_free_seg(q->sw_ring[i]);
299 q->sw_ring[i] = NULL;
305 * free all queue memory used when releasing the queue (i.e. configure)
308 tx_queue_free(struct fm10k_tx_queue *q)
310 PMD_INIT_FUNC_TRACE();
312 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
314 if (q->rs_tracker.list) {
315 rte_free(q->rs_tracker.list);
316 q->rs_tracker.list = NULL;
319 rte_free(q->sw_ring);
328 * disable TX queue, wait unitl HW finished necessary flush operation
331 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
335 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
336 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
337 reg & ~FM10K_TXDCTL_ENABLE);
339 /* Wait 100us at most */
340 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
342 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
343 if (!(reg & FM10K_TXDCTL_ENABLE))
347 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
354 fm10k_check_mq_mode(struct rte_eth_dev *dev)
356 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
357 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358 struct rte_eth_vmdq_rx_conf *vmdq_conf;
359 uint16_t nb_rx_q = dev->data->nb_rx_queues;
361 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
363 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
364 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
368 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
371 if (hw->mac.type == fm10k_mac_vf) {
372 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
376 /* Check VMDQ queue pool number */
377 if (vmdq_conf->nb_queue_pools >
378 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
379 vmdq_conf->nb_queue_pools > nb_rx_q) {
380 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
381 vmdq_conf->nb_queue_pools);
388 static const struct fm10k_txq_ops def_txq_ops = {
389 .reset = tx_queue_reset,
393 fm10k_dev_configure(struct rte_eth_dev *dev)
397 PMD_INIT_FUNC_TRACE();
399 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
400 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
401 /* multipe queue mode checking */
402 ret = fm10k_check_mq_mode(dev);
404 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
412 /* fls = find last set bit = 32 minus the number of leading zeros */
414 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
418 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
420 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
421 struct rte_eth_vmdq_rx_conf *vmdq_conf;
424 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
426 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
427 if (!vmdq_conf->pool_map[i].pools)
430 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
431 fm10k_mbx_unlock(hw);
436 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
438 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440 /* Add default mac address */
441 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
442 MAIN_VSI_POOL_NUMBER);
446 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
448 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
450 uint32_t mrqc, *key, i, reta, j;
453 #define RSS_KEY_SIZE 40
454 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
455 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
456 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
457 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
458 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
459 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
462 if (dev->data->nb_rx_queues == 1 ||
463 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
464 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
467 /* random key is rss_intel_key (default) or user provided (rss_key) */
468 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
469 key = (uint32_t *)rss_intel_key;
471 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
473 /* Now fill our hash function seeds, 4 bytes at a time */
474 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
475 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
478 * Fill in redirection table
479 * The byte-swap is needed because NIC registers are in
480 * little-endian order.
483 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
484 if (j == dev->data->nb_rx_queues)
486 reta = (reta << CHAR_BIT) | j;
488 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
493 * Generate RSS hash based on packet types, TCP/UDP
494 * port numbers and/or IPv4/v6 src and dst addresses
496 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
498 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
499 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
500 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
501 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
502 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
503 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
504 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
505 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
506 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
509 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
514 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
518 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
519 uint16_t nb_lport_old, uint16_t nb_lport_new)
521 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
525 /* Disable previous logic ports */
527 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
528 nb_lport_old, false);
529 /* Enable new logic ports */
530 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
532 fm10k_mbx_unlock(hw);
534 for (i = 0; i < nb_lport_new; i++) {
535 /* Set unicast mode by default. App can change
536 * to other mode in other API func.
539 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
540 FM10K_XCAST_MODE_NONE);
541 fm10k_mbx_unlock(hw);
546 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
548 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct rte_eth_vmdq_rx_conf *vmdq_conf;
550 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
551 struct fm10k_macvlan_filter_info *macvlan;
552 uint16_t nb_queue_pools = 0; /* pool number in configuration */
553 uint16_t nb_lport_new, nb_lport_old;
555 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
556 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
558 fm10k_dev_rss_configure(dev);
560 /* only PF supports VMDQ */
561 if (hw->mac.type != fm10k_mac_pf)
564 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
565 nb_queue_pools = vmdq_conf->nb_queue_pools;
567 /* no pool number change, no need to update logic port and VLAN/MAC */
568 if (macvlan->nb_queue_pools == nb_queue_pools)
571 nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
572 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
573 fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
575 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
576 memset(dev->data->mac_addrs, 0,
577 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
578 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
579 &dev->data->mac_addrs[0]);
580 memset(macvlan, 0, sizeof(*macvlan));
581 macvlan->nb_queue_pools = nb_queue_pools;
584 fm10k_dev_vmdq_rx_configure(dev);
586 fm10k_dev_pf_main_vsi_reset(dev);
590 fm10k_dev_tx_init(struct rte_eth_dev *dev)
592 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594 struct fm10k_tx_queue *txq;
598 /* Disable TXINT to avoid possible interrupt */
599 for (i = 0; i < hw->mac.max_queues; i++)
600 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
601 3 << FM10K_TXINT_TIMER_SHIFT);
604 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
605 txq = dev->data->tx_queues[i];
606 base_addr = txq->hw_ring_phys_addr;
607 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
609 /* disable queue to avoid issues while updating state */
610 ret = tx_queue_disable(hw, i);
612 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
616 /* set location and size for descriptor ring */
617 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
618 base_addr & UINT64_LOWER_32BITS_MASK);
619 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
620 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
621 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
624 /* set up vector or scalar TX function as appropriate */
625 fm10k_set_tx_function(dev);
631 fm10k_dev_rx_init(struct rte_eth_dev *dev)
633 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635 struct fm10k_rx_queue *rxq;
638 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
641 /* Disable RXINT to avoid possible interrupt */
642 for (i = 0; i < hw->mac.max_queues; i++)
643 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
644 3 << FM10K_RXINT_TIMER_SHIFT);
646 /* Setup RX queues */
647 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
648 rxq = dev->data->rx_queues[i];
649 base_addr = rxq->hw_ring_phys_addr;
650 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
652 /* disable queue to avoid issues while updating state */
653 ret = rx_queue_disable(hw, i);
655 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
659 /* Setup the Base and Length of the Rx Descriptor Ring */
660 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
661 base_addr & UINT64_LOWER_32BITS_MASK);
662 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
663 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
664 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
666 /* Configure the Rx buffer size for one buff without split */
667 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
668 RTE_PKTMBUF_HEADROOM);
669 /* As RX buffer is aligned to 512B within mbuf, some bytes are
670 * reserved for this purpose, and the worst case could be 511B.
671 * But SRR reg assumes all buffers have the same size. In order
672 * to fill the gap, we'll have to consider the worst case and
673 * assume 512B is reserved. If we don't do so, it's possible
674 * for HW to overwrite data to next mbuf.
676 buf_size -= FM10K_RX_DATABUF_ALIGN;
678 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
679 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
681 /* It adds dual VLAN length for supporting dual VLAN */
682 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
683 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
684 dev->data->dev_conf.rxmode.enable_scatter) {
686 dev->data->scattered_rx = 1;
687 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
688 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
689 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
692 /* Enable drop on empty, it's RO for VF */
693 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
694 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
696 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
697 FM10K_WRITE_FLUSH(hw);
700 /* Configure VMDQ/RSS if applicable */
701 fm10k_dev_mq_rx_configure(dev);
703 /* Decide the best RX function */
704 fm10k_set_rx_function(dev);
710 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
712 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
715 struct fm10k_rx_queue *rxq;
717 PMD_INIT_FUNC_TRACE();
719 if (rx_queue_id < dev->data->nb_rx_queues) {
720 rxq = dev->data->rx_queues[rx_queue_id];
721 err = rx_queue_reset(rxq);
722 if (err == -ENOMEM) {
723 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
725 } else if (err == -EINVAL) {
726 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
731 /* Setup the HW Rx Head and Tail Descriptor Pointers
732 * Note: this must be done AFTER the queue is enabled on real
733 * hardware, but BEFORE the queue is enabled when using the
734 * emulation platform. Do it in both places for now and remove
735 * this comment and the following two register writes when the
736 * emulation platform is no longer being used.
738 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
739 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
741 /* Set PF ownership flag for PF devices */
742 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
743 if (hw->mac.type == fm10k_mac_pf)
744 reg |= FM10K_RXQCTL_PF;
745 reg |= FM10K_RXQCTL_ENABLE;
746 /* enable RX queue */
747 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
748 FM10K_WRITE_FLUSH(hw);
750 /* Setup the HW Rx Head and Tail Descriptor Pointers
751 * Note: this must be done AFTER the queue is enabled
753 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
754 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
755 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
762 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
764 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
766 PMD_INIT_FUNC_TRACE();
768 if (rx_queue_id < dev->data->nb_rx_queues) {
769 /* Disable RX queue */
770 rx_queue_disable(hw, rx_queue_id);
772 /* Free mbuf and clean HW ring */
773 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
774 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
781 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
783 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
784 /** @todo - this should be defined in the shared code */
785 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
786 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
789 PMD_INIT_FUNC_TRACE();
791 if (tx_queue_id < dev->data->nb_tx_queues) {
792 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
796 /* reset head and tail pointers */
797 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
798 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
800 /* enable TX queue */
801 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
802 FM10K_TXDCTL_ENABLE | txdctl);
803 FM10K_WRITE_FLUSH(hw);
804 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
812 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
814 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
816 PMD_INIT_FUNC_TRACE();
818 if (tx_queue_id < dev->data->nb_tx_queues) {
819 tx_queue_disable(hw, tx_queue_id);
820 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
821 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
827 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
829 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
830 != FM10K_DGLORTMAP_NONE);
834 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
836 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
839 PMD_INIT_FUNC_TRACE();
841 /* Return if it didn't acquire valid glort range */
842 if (!fm10k_glort_valid(hw))
846 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
847 FM10K_XCAST_MODE_PROMISC);
848 fm10k_mbx_unlock(hw);
850 if (status != FM10K_SUCCESS)
851 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
855 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
857 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
861 PMD_INIT_FUNC_TRACE();
863 /* Return if it didn't acquire valid glort range */
864 if (!fm10k_glort_valid(hw))
867 if (dev->data->all_multicast == 1)
868 mode = FM10K_XCAST_MODE_ALLMULTI;
870 mode = FM10K_XCAST_MODE_NONE;
873 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
875 fm10k_mbx_unlock(hw);
877 if (status != FM10K_SUCCESS)
878 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
882 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
884 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887 PMD_INIT_FUNC_TRACE();
889 /* Return if it didn't acquire valid glort range */
890 if (!fm10k_glort_valid(hw))
893 /* If promiscuous mode is enabled, it doesn't make sense to enable
894 * allmulticast and disable promiscuous since fm10k only can select
897 if (dev->data->promiscuous) {
898 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
899 "needn't enable allmulticast");
904 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
905 FM10K_XCAST_MODE_ALLMULTI);
906 fm10k_mbx_unlock(hw);
908 if (status != FM10K_SUCCESS)
909 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
913 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
915 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
918 PMD_INIT_FUNC_TRACE();
920 /* Return if it didn't acquire valid glort range */
921 if (!fm10k_glort_valid(hw))
924 if (dev->data->promiscuous) {
925 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
926 "since promisc mode is enabled");
931 /* Change mode to unicast mode */
932 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
933 FM10K_XCAST_MODE_NONE);
934 fm10k_mbx_unlock(hw);
936 if (status != FM10K_SUCCESS)
937 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
941 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
943 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
944 uint32_t dglortdec, pool_len, rss_len, i;
945 uint16_t nb_queue_pools;
946 struct fm10k_macvlan_filter_info *macvlan;
948 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
949 nb_queue_pools = macvlan->nb_queue_pools;
950 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
951 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
952 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
954 /* Establish only MAP 0 as valid */
955 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
957 /* Configure VMDQ/RSS DGlort Decoder */
958 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
960 /* Invalidate all other GLORT entries */
961 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
962 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
963 FM10K_DGLORTMAP_NONE);
966 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
968 fm10k_dev_start(struct rte_eth_dev *dev)
970 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
973 PMD_INIT_FUNC_TRACE();
975 /* stop, init, then start the hw */
976 diag = fm10k_stop_hw(hw);
977 if (diag != FM10K_SUCCESS) {
978 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
982 diag = fm10k_init_hw(hw);
983 if (diag != FM10K_SUCCESS) {
984 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
988 diag = fm10k_start_hw(hw);
989 if (diag != FM10K_SUCCESS) {
990 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
994 diag = fm10k_dev_tx_init(dev);
996 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1000 diag = fm10k_dev_rx_init(dev);
1002 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1006 if (hw->mac.type == fm10k_mac_pf)
1007 fm10k_dev_dglort_map_configure(dev);
1009 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1010 struct fm10k_rx_queue *rxq;
1011 rxq = dev->data->rx_queues[i];
1013 if (rxq->rx_deferred_start)
1015 diag = fm10k_dev_rx_queue_start(dev, i);
1018 for (j = 0; j < i; ++j)
1019 rx_queue_clean(dev->data->rx_queues[j]);
1024 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1025 struct fm10k_tx_queue *txq;
1026 txq = dev->data->tx_queues[i];
1028 if (txq->tx_deferred_start)
1030 diag = fm10k_dev_tx_queue_start(dev, i);
1033 for (j = 0; j < i; ++j)
1034 tx_queue_clean(dev->data->tx_queues[j]);
1035 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1036 rx_queue_clean(dev->data->rx_queues[j]);
1041 /* Update default vlan when not in VMDQ mode */
1042 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1043 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1049 fm10k_dev_stop(struct rte_eth_dev *dev)
1053 PMD_INIT_FUNC_TRACE();
1055 if (dev->data->tx_queues)
1056 for (i = 0; i < dev->data->nb_tx_queues; i++)
1057 fm10k_dev_tx_queue_stop(dev, i);
1059 if (dev->data->rx_queues)
1060 for (i = 0; i < dev->data->nb_rx_queues; i++)
1061 fm10k_dev_rx_queue_stop(dev, i);
1065 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1069 PMD_INIT_FUNC_TRACE();
1071 if (dev->data->tx_queues) {
1072 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1073 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1079 if (dev->data->rx_queues) {
1080 for (i = 0; i < dev->data->nb_rx_queues; i++)
1081 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1086 fm10k_dev_close(struct rte_eth_dev *dev)
1088 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090 struct fm10k_macvlan_filter_info *macvlan;
1092 PMD_INIT_FUNC_TRACE();
1094 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1095 nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1097 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1099 fm10k_mbx_unlock(hw);
1101 /* Stop mailbox service first */
1102 fm10k_close_mbx_service(hw);
1103 fm10k_dev_stop(dev);
1104 fm10k_dev_queue_release(dev);
1109 fm10k_link_update(struct rte_eth_dev *dev,
1110 __rte_unused int wait_to_complete)
1112 PMD_INIT_FUNC_TRACE();
1114 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1115 * x8 PCIe interface. For now, we leave the speed undefined since there
1116 * is no 50Gbps Ethernet. */
1117 dev->data->dev_link.link_speed = 0;
1118 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1119 dev->data->dev_link.link_status = 1;
1125 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1128 struct fm10k_hw_stats *hw_stats =
1129 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1130 unsigned i, q, count = 0;
1132 if (n < FM10K_NB_XSTATS)
1133 return FM10K_NB_XSTATS;
1136 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1137 snprintf(xstats[count].name, sizeof(xstats[count].name),
1138 "%s", fm10k_hw_stats_strings[count].name);
1139 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1140 fm10k_hw_stats_strings[count].offset);
1144 /* PF queue stats */
1145 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1146 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1147 snprintf(xstats[count].name, sizeof(xstats[count].name),
1149 fm10k_hw_stats_rx_q_strings[i].name);
1150 xstats[count].value =
1151 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1152 fm10k_hw_stats_rx_q_strings[i].offset);
1155 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1156 snprintf(xstats[count].name, sizeof(xstats[count].name),
1158 fm10k_hw_stats_tx_q_strings[i].name);
1159 xstats[count].value =
1160 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1161 fm10k_hw_stats_tx_q_strings[i].offset);
1166 return FM10K_NB_XSTATS;
1170 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1172 uint64_t ipackets, opackets, ibytes, obytes;
1173 struct fm10k_hw *hw =
1174 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175 struct fm10k_hw_stats *hw_stats =
1176 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1179 PMD_INIT_FUNC_TRACE();
1181 fm10k_update_hw_stats(hw, hw_stats);
1183 ipackets = opackets = ibytes = obytes = 0;
1184 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1185 (i < hw->mac.max_queues); ++i) {
1186 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1187 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1188 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1189 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1190 ipackets += stats->q_ipackets[i];
1191 opackets += stats->q_opackets[i];
1192 ibytes += stats->q_ibytes[i];
1193 obytes += stats->q_obytes[i];
1195 stats->ipackets = ipackets;
1196 stats->opackets = opackets;
1197 stats->ibytes = ibytes;
1198 stats->obytes = obytes;
1202 fm10k_stats_reset(struct rte_eth_dev *dev)
1204 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 struct fm10k_hw_stats *hw_stats =
1206 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1208 PMD_INIT_FUNC_TRACE();
1210 memset(hw_stats, 0, sizeof(*hw_stats));
1211 fm10k_rebind_hw_stats(hw, hw_stats);
1215 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1216 struct rte_eth_dev_info *dev_info)
1218 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1220 PMD_INIT_FUNC_TRACE();
1222 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1223 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1224 dev_info->max_rx_queues = hw->mac.max_queues;
1225 dev_info->max_tx_queues = hw->mac.max_queues;
1226 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1227 dev_info->max_hash_mac_addrs = 0;
1228 dev_info->max_vfs = dev->pci_dev->max_vfs;
1229 dev_info->vmdq_pool_base = 0;
1230 dev_info->vmdq_queue_base = 0;
1231 dev_info->max_vmdq_pools = ETH_32_POOLS;
1232 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1233 dev_info->rx_offload_capa =
1234 DEV_RX_OFFLOAD_VLAN_STRIP |
1235 DEV_RX_OFFLOAD_IPV4_CKSUM |
1236 DEV_RX_OFFLOAD_UDP_CKSUM |
1237 DEV_RX_OFFLOAD_TCP_CKSUM;
1238 dev_info->tx_offload_capa =
1239 DEV_TX_OFFLOAD_VLAN_INSERT |
1240 DEV_TX_OFFLOAD_IPV4_CKSUM |
1241 DEV_TX_OFFLOAD_UDP_CKSUM |
1242 DEV_TX_OFFLOAD_TCP_CKSUM |
1243 DEV_TX_OFFLOAD_TCP_TSO;
1245 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1246 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1248 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1250 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1251 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1252 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1254 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1258 dev_info->default_txconf = (struct rte_eth_txconf) {
1260 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1261 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1262 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1264 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1265 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1266 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1269 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1270 .nb_max = FM10K_MAX_RX_DESC,
1271 .nb_min = FM10K_MIN_RX_DESC,
1272 .nb_align = FM10K_MULT_RX_DESC,
1275 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1276 .nb_max = FM10K_MAX_TX_DESC,
1277 .nb_min = FM10K_MIN_TX_DESC,
1278 .nb_align = FM10K_MULT_TX_DESC,
1283 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1286 uint16_t mac_num = 0;
1287 uint32_t vid_idx, vid_bit, mac_index;
1288 struct fm10k_hw *hw;
1289 struct fm10k_macvlan_filter_info *macvlan;
1290 struct rte_eth_dev_data *data = dev->data;
1292 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1293 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1295 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1296 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1300 if (vlan_id > ETH_VLAN_ID_MAX) {
1301 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1305 vid_idx = FM10K_VFTA_IDX(vlan_id);
1306 vid_bit = FM10K_VFTA_BIT(vlan_id);
1307 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1308 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1310 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1311 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1312 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1313 "in the VLAN filter table");
1318 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1319 fm10k_mbx_unlock(hw);
1320 if (result != FM10K_SUCCESS) {
1321 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1325 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1326 (result == FM10K_SUCCESS); mac_index++) {
1327 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1329 if (mac_num > macvlan->mac_num - 1) {
1330 PMD_INIT_LOG(ERR, "MAC address number "
1335 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1336 data->mac_addrs[mac_index].addr_bytes,
1338 fm10k_mbx_unlock(hw);
1341 if (result != FM10K_SUCCESS) {
1342 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1347 macvlan->vlan_num++;
1348 macvlan->vfta[vid_idx] |= vid_bit;
1350 macvlan->vlan_num--;
1351 macvlan->vfta[vid_idx] &= ~vid_bit;
1357 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1359 if (mask & ETH_VLAN_STRIP_MASK) {
1360 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1361 PMD_INIT_LOG(ERR, "VLAN stripping is "
1362 "always on in fm10k");
1365 if (mask & ETH_VLAN_EXTEND_MASK) {
1366 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1367 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1368 "supported in fm10k");
1371 if (mask & ETH_VLAN_FILTER_MASK) {
1372 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1373 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1377 /* Add/Remove a MAC address, and update filters to main VSI */
1378 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1379 const u8 *mac, bool add, uint32_t pool)
1381 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1382 struct fm10k_macvlan_filter_info *macvlan;
1385 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1387 if (pool != MAIN_VSI_POOL_NUMBER) {
1388 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1389 "mac to pool %u", pool);
1392 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1393 if (!macvlan->vfta[j])
1395 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1396 if (!(macvlan->vfta[j] & (1 << k)))
1398 if (i + 1 > macvlan->vlan_num) {
1399 PMD_INIT_LOG(ERR, "vlan number not match");
1403 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1404 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1405 fm10k_mbx_unlock(hw);
1411 /* Add/Remove a MAC address, and update filters to VMDQ */
1412 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1413 const u8 *mac, bool add, uint32_t pool)
1415 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1416 struct fm10k_macvlan_filter_info *macvlan;
1417 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1420 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1421 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1423 if (pool > macvlan->nb_queue_pools) {
1424 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1426 pool, macvlan->nb_queue_pools);
1429 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1430 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1433 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1434 vmdq_conf->pool_map[i].vlan_id, add, 0);
1435 fm10k_mbx_unlock(hw);
1439 /* Add/Remove a MAC address, and update filters */
1440 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1441 const u8 *mac, bool add, uint32_t pool)
1443 struct fm10k_macvlan_filter_info *macvlan;
1445 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1447 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1448 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1450 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1458 /* Add a MAC address, and update filters */
1460 fm10k_macaddr_add(struct rte_eth_dev *dev,
1461 struct ether_addr *mac_addr,
1465 struct fm10k_macvlan_filter_info *macvlan;
1467 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1468 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1469 macvlan->mac_vmdq_id[index] = pool;
1472 /* Remove a MAC address, and update filters */
1474 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1476 struct rte_eth_dev_data *data = dev->data;
1477 struct fm10k_macvlan_filter_info *macvlan;
1479 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1480 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1481 FALSE, macvlan->mac_vmdq_id[index]);
1482 macvlan->mac_vmdq_id[index] = 0;
1486 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1488 if ((request < min) || (request > max) || ((request % mult) != 0))
1496 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1498 if ((request < min) || (request > max) || ((div % request) != 0))
1505 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1507 uint16_t rx_free_thresh;
1509 if (conf->rx_free_thresh == 0)
1510 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1512 rx_free_thresh = conf->rx_free_thresh;
1514 /* make sure the requested threshold satisfies the constraints */
1515 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1516 FM10K_RX_FREE_THRESH_MAX(q),
1517 FM10K_RX_FREE_THRESH_DIV(q),
1519 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1520 "less than or equal to %u, "
1521 "greater than or equal to %u, "
1522 "and a divisor of %u",
1523 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1524 FM10K_RX_FREE_THRESH_MIN(q),
1525 FM10K_RX_FREE_THRESH_DIV(q));
1529 q->alloc_thresh = rx_free_thresh;
1530 q->drop_en = conf->rx_drop_en;
1531 q->rx_deferred_start = conf->rx_deferred_start;
1537 * Hardware requires specific alignment for Rx packet buffers. At
1538 * least one of the following two conditions must be satisfied.
1539 * 1. Address is 512B aligned
1540 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1542 * As such, the driver may need to adjust the DMA address within the
1543 * buffer by up to 512B.
1545 * return 1 if the element size is valid, otherwise return 0.
1548 mempool_element_size_valid(struct rte_mempool *mp)
1552 /* elt_size includes mbuf header and headroom */
1553 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1554 RTE_PKTMBUF_HEADROOM;
1556 /* account for up to 512B of alignment */
1557 min_size -= FM10K_RX_DATABUF_ALIGN;
1559 /* sanity check for overflow */
1560 if (min_size > mp->elt_size)
1568 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1569 uint16_t nb_desc, unsigned int socket_id,
1570 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1572 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1573 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1574 struct fm10k_rx_queue *q;
1575 const struct rte_memzone *mz;
1577 PMD_INIT_FUNC_TRACE();
1579 /* make sure the mempool element size can account for alignment. */
1580 if (!mempool_element_size_valid(mp)) {
1581 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1585 /* make sure a valid number of descriptors have been requested */
1586 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1587 FM10K_MULT_RX_DESC, nb_desc)) {
1588 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1589 "less than or equal to %"PRIu32", "
1590 "greater than or equal to %u, "
1591 "and a multiple of %u",
1592 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1593 FM10K_MULT_RX_DESC);
1598 * if this queue existed already, free the associated memory. The
1599 * queue cannot be reused in case we need to allocate memory on
1600 * different socket than was previously used.
1602 if (dev->data->rx_queues[queue_id] != NULL) {
1603 rx_queue_free(dev->data->rx_queues[queue_id]);
1604 dev->data->rx_queues[queue_id] = NULL;
1607 /* allocate memory for the queue structure */
1608 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1611 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1617 q->nb_desc = nb_desc;
1618 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1619 q->port_id = dev->data->port_id;
1620 q->queue_id = queue_id;
1621 q->tail_ptr = (volatile uint32_t *)
1622 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1623 if (handle_rxconf(q, conf))
1626 /* allocate memory for the software ring */
1627 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1628 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1629 RTE_CACHE_LINE_SIZE, socket_id);
1630 if (q->sw_ring == NULL) {
1631 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1637 * allocate memory for the hardware descriptor ring. A memzone large
1638 * enough to hold the maximum ring size is requested to allow for
1639 * resizing in later calls to the queue setup function.
1641 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1642 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1645 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1646 rte_free(q->sw_ring);
1650 q->hw_ring = mz->addr;
1651 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1653 /* Check if number of descs satisfied Vector requirement */
1654 if (!rte_is_power_of_2(nb_desc)) {
1655 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1656 "preconditions - canceling the feature for "
1657 "the whole port[%d]",
1658 q->queue_id, q->port_id);
1659 dev_info->rx_vec_allowed = false;
1661 fm10k_rxq_vec_setup(q);
1663 dev->data->rx_queues[queue_id] = q;
1668 fm10k_rx_queue_release(void *queue)
1670 PMD_INIT_FUNC_TRACE();
1672 rx_queue_free(queue);
1676 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1678 uint16_t tx_free_thresh;
1679 uint16_t tx_rs_thresh;
1681 /* constraint MACROs require that tx_free_thresh is configured
1682 * before tx_rs_thresh */
1683 if (conf->tx_free_thresh == 0)
1684 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1686 tx_free_thresh = conf->tx_free_thresh;
1688 /* make sure the requested threshold satisfies the constraints */
1689 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1690 FM10K_TX_FREE_THRESH_MAX(q),
1691 FM10K_TX_FREE_THRESH_DIV(q),
1693 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1694 "less than or equal to %u, "
1695 "greater than or equal to %u, "
1696 "and a divisor of %u",
1697 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1698 FM10K_TX_FREE_THRESH_MIN(q),
1699 FM10K_TX_FREE_THRESH_DIV(q));
1703 q->free_thresh = tx_free_thresh;
1705 if (conf->tx_rs_thresh == 0)
1706 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1708 tx_rs_thresh = conf->tx_rs_thresh;
1710 q->tx_deferred_start = conf->tx_deferred_start;
1712 /* make sure the requested threshold satisfies the constraints */
1713 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1714 FM10K_TX_RS_THRESH_MAX(q),
1715 FM10K_TX_RS_THRESH_DIV(q),
1717 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1718 "less than or equal to %u, "
1719 "greater than or equal to %u, "
1720 "and a divisor of %u",
1721 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1722 FM10K_TX_RS_THRESH_MIN(q),
1723 FM10K_TX_RS_THRESH_DIV(q));
1727 q->rs_thresh = tx_rs_thresh;
1733 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1734 uint16_t nb_desc, unsigned int socket_id,
1735 const struct rte_eth_txconf *conf)
1737 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1738 struct fm10k_tx_queue *q;
1739 const struct rte_memzone *mz;
1741 PMD_INIT_FUNC_TRACE();
1743 /* make sure a valid number of descriptors have been requested */
1744 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1745 FM10K_MULT_TX_DESC, nb_desc)) {
1746 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1747 "less than or equal to %"PRIu32", "
1748 "greater than or equal to %u, "
1749 "and a multiple of %u",
1750 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1751 FM10K_MULT_TX_DESC);
1756 * if this queue existed already, free the associated memory. The
1757 * queue cannot be reused in case we need to allocate memory on
1758 * different socket than was previously used.
1760 if (dev->data->tx_queues[queue_id] != NULL) {
1761 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1764 dev->data->tx_queues[queue_id] = NULL;
1767 /* allocate memory for the queue structure */
1768 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1771 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1776 q->nb_desc = nb_desc;
1777 q->port_id = dev->data->port_id;
1778 q->queue_id = queue_id;
1779 q->txq_flags = conf->txq_flags;
1780 q->ops = &def_txq_ops;
1781 q->tail_ptr = (volatile uint32_t *)
1782 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1783 if (handle_txconf(q, conf))
1786 /* allocate memory for the software ring */
1787 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1788 nb_desc * sizeof(struct rte_mbuf *),
1789 RTE_CACHE_LINE_SIZE, socket_id);
1790 if (q->sw_ring == NULL) {
1791 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1797 * allocate memory for the hardware descriptor ring. A memzone large
1798 * enough to hold the maximum ring size is requested to allow for
1799 * resizing in later calls to the queue setup function.
1801 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1802 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1805 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1806 rte_free(q->sw_ring);
1810 q->hw_ring = mz->addr;
1811 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1814 * allocate memory for the RS bit tracker. Enough slots to hold the
1815 * descriptor index for each RS bit needing to be set are required.
1817 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1818 ((nb_desc + 1) / q->rs_thresh) *
1820 RTE_CACHE_LINE_SIZE, socket_id);
1821 if (q->rs_tracker.list == NULL) {
1822 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1823 rte_free(q->sw_ring);
1828 dev->data->tx_queues[queue_id] = q;
1833 fm10k_tx_queue_release(void *queue)
1835 struct fm10k_tx_queue *q = queue;
1836 PMD_INIT_FUNC_TRACE();
1842 fm10k_reta_update(struct rte_eth_dev *dev,
1843 struct rte_eth_rss_reta_entry64 *reta_conf,
1846 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847 uint16_t i, j, idx, shift;
1851 PMD_INIT_FUNC_TRACE();
1853 if (reta_size > FM10K_MAX_RSS_INDICES) {
1854 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1855 "(%d) doesn't match the number hardware can supported "
1856 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1861 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1862 * 128-entries in 32 registers
1864 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1865 idx = i / RTE_RETA_GROUP_SIZE;
1866 shift = i % RTE_RETA_GROUP_SIZE;
1867 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1868 BIT_MASK_PER_UINT32);
1873 if (mask != BIT_MASK_PER_UINT32)
1874 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1876 for (j = 0; j < CHARS_PER_UINT32; j++) {
1877 if (mask & (0x1 << j)) {
1879 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1880 reta |= reta_conf[idx].reta[shift + j] <<
1884 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1891 fm10k_reta_query(struct rte_eth_dev *dev,
1892 struct rte_eth_rss_reta_entry64 *reta_conf,
1895 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1896 uint16_t i, j, idx, shift;
1900 PMD_INIT_FUNC_TRACE();
1902 if (reta_size < FM10K_MAX_RSS_INDICES) {
1903 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1904 "(%d) doesn't match the number hardware can supported "
1905 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1910 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1911 * 128-entries in 32 registers
1913 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1914 idx = i / RTE_RETA_GROUP_SIZE;
1915 shift = i % RTE_RETA_GROUP_SIZE;
1916 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1917 BIT_MASK_PER_UINT32);
1921 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1922 for (j = 0; j < CHARS_PER_UINT32; j++) {
1923 if (mask & (0x1 << j))
1924 reta_conf[idx].reta[shift + j] = ((reta >>
1925 CHAR_BIT * j) & UINT8_MAX);
1933 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1934 struct rte_eth_rss_conf *rss_conf)
1936 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1939 uint64_t hf = rss_conf->rss_hf;
1942 PMD_INIT_FUNC_TRACE();
1944 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1945 FM10K_RSSRK_ENTRIES_PER_REG)
1952 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1953 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1954 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1955 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1956 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1957 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1958 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1959 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1960 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1962 /* If the mapping doesn't fit any supported, return */
1967 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1968 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1970 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1976 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1977 struct rte_eth_rss_conf *rss_conf)
1979 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1985 PMD_INIT_FUNC_TRACE();
1987 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1988 FM10K_RSSRK_ENTRIES_PER_REG)
1992 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1993 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1995 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1997 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1998 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1999 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2000 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2001 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2002 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2003 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2004 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2005 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2007 rss_conf->rss_hf = hf;
2013 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2015 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2016 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2018 /* Bind all local non-queue interrupt to vector 0 */
2021 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2022 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2023 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2024 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2025 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2026 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2028 /* Enable misc causes */
2029 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2030 FM10K_EIMR_ENABLE(THI_FAULT) |
2031 FM10K_EIMR_ENABLE(FUM_FAULT) |
2032 FM10K_EIMR_ENABLE(MAILBOX) |
2033 FM10K_EIMR_ENABLE(SWITCHREADY) |
2034 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2035 FM10K_EIMR_ENABLE(SRAMERROR) |
2036 FM10K_EIMR_ENABLE(VFLR));
2039 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2040 FM10K_ITR_MASK_CLEAR);
2041 FM10K_WRITE_FLUSH(hw);
2045 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2047 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2048 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2052 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2053 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2054 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2055 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2056 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2057 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2059 /* Disable misc causes */
2060 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2061 FM10K_EIMR_DISABLE(THI_FAULT) |
2062 FM10K_EIMR_DISABLE(FUM_FAULT) |
2063 FM10K_EIMR_DISABLE(MAILBOX) |
2064 FM10K_EIMR_DISABLE(SWITCHREADY) |
2065 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2066 FM10K_EIMR_DISABLE(SRAMERROR) |
2067 FM10K_EIMR_DISABLE(VFLR));
2070 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2071 FM10K_WRITE_FLUSH(hw);
2075 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2077 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2080 /* Bind all local non-queue interrupt to vector 0 */
2083 /* Only INT 0 available, other 15 are reserved. */
2084 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2087 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2088 FM10K_ITR_MASK_CLEAR);
2089 FM10K_WRITE_FLUSH(hw);
2093 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2095 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2096 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2100 /* Only INT 0 available, other 15 are reserved. */
2101 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2104 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2105 FM10K_WRITE_FLUSH(hw);
2109 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2111 struct fm10k_fault fault;
2113 const char *estr = "Unknown error";
2115 /* Process PCA fault */
2116 if (eicr & FM10K_EICR_PCA_FAULT) {
2117 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2120 switch (fault.type) {
2122 estr = "PCA_NO_FAULT"; break;
2123 case PCA_UNMAPPED_ADDR:
2124 estr = "PCA_UNMAPPED_ADDR"; break;
2125 case PCA_BAD_QACCESS_PF:
2126 estr = "PCA_BAD_QACCESS_PF"; break;
2127 case PCA_BAD_QACCESS_VF:
2128 estr = "PCA_BAD_QACCESS_VF"; break;
2129 case PCA_MALICIOUS_REQ:
2130 estr = "PCA_MALICIOUS_REQ"; break;
2131 case PCA_POISONED_TLP:
2132 estr = "PCA_POISONED_TLP"; break;
2134 estr = "PCA_TLP_ABORT"; break;
2138 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2139 estr, fault.func ? "VF" : "PF", fault.func,
2140 fault.address, fault.specinfo);
2143 /* Process THI fault */
2144 if (eicr & FM10K_EICR_THI_FAULT) {
2145 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2148 switch (fault.type) {
2150 estr = "THI_NO_FAULT"; break;
2151 case THI_MAL_DIS_Q_FAULT:
2152 estr = "THI_MAL_DIS_Q_FAULT"; break;
2156 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2157 estr, fault.func ? "VF" : "PF", fault.func,
2158 fault.address, fault.specinfo);
2161 /* Process FUM fault */
2162 if (eicr & FM10K_EICR_FUM_FAULT) {
2163 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2166 switch (fault.type) {
2168 estr = "FUM_NO_FAULT"; break;
2169 case FUM_UNMAPPED_ADDR:
2170 estr = "FUM_UNMAPPED_ADDR"; break;
2171 case FUM_POISONED_TLP:
2172 estr = "FUM_POISONED_TLP"; break;
2173 case FUM_BAD_VF_QACCESS:
2174 estr = "FUM_BAD_VF_QACCESS"; break;
2175 case FUM_ADD_DECODE_ERR:
2176 estr = "FUM_ADD_DECODE_ERR"; break;
2178 estr = "FUM_RO_ERROR"; break;
2179 case FUM_QPRC_CRC_ERROR:
2180 estr = "FUM_QPRC_CRC_ERROR"; break;
2181 case FUM_CSR_TIMEOUT:
2182 estr = "FUM_CSR_TIMEOUT"; break;
2183 case FUM_INVALID_TYPE:
2184 estr = "FUM_INVALID_TYPE"; break;
2185 case FUM_INVALID_LENGTH:
2186 estr = "FUM_INVALID_LENGTH"; break;
2187 case FUM_INVALID_BE:
2188 estr = "FUM_INVALID_BE"; break;
2189 case FUM_INVALID_ALIGN:
2190 estr = "FUM_INVALID_ALIGN"; break;
2194 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2195 estr, fault.func ? "VF" : "PF", fault.func,
2196 fault.address, fault.specinfo);
2201 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2206 * PF interrupt handler triggered by NIC for handling specific interrupt.
2209 * Pointer to interrupt handle.
2211 * The address of parameter (struct rte_eth_dev *) regsitered before.
2217 fm10k_dev_interrupt_handler_pf(
2218 __rte_unused struct rte_intr_handle *handle,
2221 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2222 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2223 uint32_t cause, status;
2225 if (hw->mac.type != fm10k_mac_pf)
2228 cause = FM10K_READ_REG(hw, FM10K_EICR);
2230 /* Handle PCI fault cases */
2231 if (cause & FM10K_EICR_FAULT_MASK) {
2232 PMD_INIT_LOG(ERR, "INT: find fault!");
2233 fm10k_dev_handle_fault(hw, cause);
2236 /* Handle switch up/down */
2237 if (cause & FM10K_EICR_SWITCHNOTREADY)
2238 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2240 if (cause & FM10K_EICR_SWITCHREADY)
2241 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2243 /* Handle mailbox message */
2245 hw->mbx.ops.process(hw, &hw->mbx);
2246 fm10k_mbx_unlock(hw);
2248 /* Handle SRAM error */
2249 if (cause & FM10K_EICR_SRAMERROR) {
2250 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2252 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2253 /* Write to clear pending bits */
2254 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2256 /* Todo: print out error message after shared code updates */
2259 /* Clear these 3 events if having any */
2260 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2261 FM10K_EICR_SWITCHREADY;
2263 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2265 /* Re-enable interrupt from device side */
2266 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2267 FM10K_ITR_MASK_CLEAR);
2268 /* Re-enable interrupt from host side */
2269 rte_intr_enable(&(dev->pci_dev->intr_handle));
2273 * VF interrupt handler triggered by NIC for handling specific interrupt.
2276 * Pointer to interrupt handle.
2278 * The address of parameter (struct rte_eth_dev *) regsitered before.
2284 fm10k_dev_interrupt_handler_vf(
2285 __rte_unused struct rte_intr_handle *handle,
2288 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2289 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2291 if (hw->mac.type != fm10k_mac_vf)
2294 /* Handle mailbox message if lock is acquired */
2296 hw->mbx.ops.process(hw, &hw->mbx);
2297 fm10k_mbx_unlock(hw);
2299 /* Re-enable interrupt from device side */
2300 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2301 FM10K_ITR_MASK_CLEAR);
2302 /* Re-enable interrupt from host side */
2303 rte_intr_enable(&(dev->pci_dev->intr_handle));
2306 /* Mailbox message handler in VF */
2307 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2308 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2309 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2310 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2311 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2314 /* Mailbox message handler in PF */
2315 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2316 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2317 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2318 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2319 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2320 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2321 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2322 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2326 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2330 /* Initialize mailbox lock */
2331 fm10k_mbx_initlock(hw);
2333 /* Replace default message handler with new ones */
2334 if (hw->mac.type == fm10k_mac_pf)
2335 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2337 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2340 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2344 /* Connect to SM for PF device or PF for VF device */
2345 return hw->mbx.ops.connect(hw, &hw->mbx);
2349 fm10k_close_mbx_service(struct fm10k_hw *hw)
2351 /* Disconnect from SM for PF device or PF for VF device */
2352 hw->mbx.ops.disconnect(hw, &hw->mbx);
2355 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2356 .dev_configure = fm10k_dev_configure,
2357 .dev_start = fm10k_dev_start,
2358 .dev_stop = fm10k_dev_stop,
2359 .dev_close = fm10k_dev_close,
2360 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2361 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2362 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2363 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2364 .stats_get = fm10k_stats_get,
2365 .xstats_get = fm10k_xstats_get,
2366 .stats_reset = fm10k_stats_reset,
2367 .xstats_reset = fm10k_stats_reset,
2368 .link_update = fm10k_link_update,
2369 .dev_infos_get = fm10k_dev_infos_get,
2370 .vlan_filter_set = fm10k_vlan_filter_set,
2371 .vlan_offload_set = fm10k_vlan_offload_set,
2372 .mac_addr_add = fm10k_macaddr_add,
2373 .mac_addr_remove = fm10k_macaddr_remove,
2374 .rx_queue_start = fm10k_dev_rx_queue_start,
2375 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2376 .tx_queue_start = fm10k_dev_tx_queue_start,
2377 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2378 .rx_queue_setup = fm10k_rx_queue_setup,
2379 .rx_queue_release = fm10k_rx_queue_release,
2380 .tx_queue_setup = fm10k_tx_queue_setup,
2381 .tx_queue_release = fm10k_tx_queue_release,
2382 .reta_update = fm10k_reta_update,
2383 .reta_query = fm10k_reta_query,
2384 .rss_hash_update = fm10k_rss_hash_update,
2385 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2388 static void __attribute__((cold))
2389 fm10k_set_tx_function(struct rte_eth_dev *dev)
2391 struct fm10k_tx_queue *txq;
2395 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2396 txq = dev->data->tx_queues[i];
2397 if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) !=
2398 FM10K_SIMPLE_TX_FLAG) {
2405 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2406 txq = dev->data->tx_queues[i];
2407 fm10k_txq_vec_setup(txq);
2409 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2411 dev->tx_pkt_burst = fm10k_xmit_pkts;
2414 static void __attribute__((cold))
2415 fm10k_set_rx_function(struct rte_eth_dev *dev)
2417 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2418 uint16_t i, rx_using_sse;
2420 /* In order to allow Vector Rx there are a few configuration
2421 * conditions to be met.
2423 if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2424 if (dev->data->scattered_rx)
2425 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2427 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2428 } else if (dev->data->scattered_rx)
2429 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2432 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2433 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2435 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2436 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2438 rxq->rx_using_sse = rx_using_sse;
2443 fm10k_params_init(struct rte_eth_dev *dev)
2445 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446 struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2448 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2449 * there is no way to get link status without reading BAR4. Until this
2450 * works, assume we have maximum bandwidth.
2451 * @todo - fix bus info
2453 hw->bus_caps.speed = fm10k_bus_speed_8000;
2454 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2455 hw->bus_caps.payload = fm10k_bus_payload_512;
2456 hw->bus.speed = fm10k_bus_speed_8000;
2457 hw->bus.width = fm10k_bus_width_pcie_x8;
2458 hw->bus.payload = fm10k_bus_payload_256;
2460 info->rx_vec_allowed = true;
2464 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2466 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2468 struct fm10k_macvlan_filter_info *macvlan;
2470 PMD_INIT_FUNC_TRACE();
2472 dev->dev_ops = &fm10k_eth_dev_ops;
2473 dev->rx_pkt_burst = &fm10k_recv_pkts;
2474 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2476 /* only initialize in the primary process */
2477 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2480 rte_eth_copy_pci_info(dev, dev->pci_dev);
2482 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2483 memset(macvlan, 0, sizeof(*macvlan));
2484 /* Vendor and Device ID need to be set before init of shared code */
2485 memset(hw, 0, sizeof(*hw));
2486 hw->device_id = dev->pci_dev->id.device_id;
2487 hw->vendor_id = dev->pci_dev->id.vendor_id;
2488 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2489 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2490 hw->revision_id = 0;
2491 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2492 if (hw->hw_addr == NULL) {
2493 PMD_INIT_LOG(ERR, "Bad mem resource."
2494 " Try to blacklist unused devices.");
2498 /* Store fm10k_adapter pointer */
2499 hw->back = dev->data->dev_private;
2501 /* Initialize the shared code */
2502 diag = fm10k_init_shared_code(hw);
2503 if (diag != FM10K_SUCCESS) {
2504 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2508 /* Initialize parameters */
2509 fm10k_params_init(dev);
2511 /* Initialize the hw */
2512 diag = fm10k_init_hw(hw);
2513 if (diag != FM10K_SUCCESS) {
2514 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2518 /* Initialize MAC address(es) */
2519 dev->data->mac_addrs = rte_zmalloc("fm10k",
2520 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2521 if (dev->data->mac_addrs == NULL) {
2522 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2526 diag = fm10k_read_mac_addr(hw);
2528 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2529 &dev->data->mac_addrs[0]);
2531 if (diag != FM10K_SUCCESS ||
2532 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2534 /* Generate a random addr */
2535 eth_random_addr(hw->mac.addr);
2536 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2537 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2538 &dev->data->mac_addrs[0]);
2541 /* Reset the hw statistics */
2542 fm10k_stats_reset(dev);
2545 diag = fm10k_reset_hw(hw);
2546 if (diag != FM10K_SUCCESS) {
2547 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2551 /* Setup mailbox service */
2552 diag = fm10k_setup_mbx_service(hw);
2553 if (diag != FM10K_SUCCESS) {
2554 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2558 /*PF/VF has different interrupt handling mechanism */
2559 if (hw->mac.type == fm10k_mac_pf) {
2560 /* register callback func to eal lib */
2561 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2562 fm10k_dev_interrupt_handler_pf, (void *)dev);
2564 /* enable MISC interrupt */
2565 fm10k_dev_enable_intr_pf(dev);
2567 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2568 fm10k_dev_interrupt_handler_vf, (void *)dev);
2570 fm10k_dev_enable_intr_vf(dev);
2573 /* Enable uio intr after callback registered */
2574 rte_intr_enable(&(dev->pci_dev->intr_handle));
2576 hw->mac.ops.update_int_moderator(hw);
2578 /* Make sure Switch Manager is ready before going forward. */
2579 if (hw->mac.type == fm10k_mac_pf) {
2580 int switch_ready = 0;
2583 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2585 hw->mac.ops.get_host_state(hw, &switch_ready);
2586 fm10k_mbx_unlock(hw);
2589 /* Delay some time to acquire async LPORT_MAP info. */
2590 rte_delay_us(WAIT_SWITCH_MSG_US);
2593 if (switch_ready == 0) {
2594 PMD_INIT_LOG(ERR, "switch is not ready");
2600 * Below function will trigger operations on mailbox, acquire lock to
2601 * avoid race condition from interrupt handler. Operations on mailbox
2602 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2603 * will handle and generate an interrupt to our side. Then, FIFO in
2604 * mailbox will be touched.
2607 /* Enable port first */
2608 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2610 /* Set unicast mode by default. App can change to other mode in other
2613 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2614 FM10K_XCAST_MODE_NONE);
2616 fm10k_mbx_unlock(hw);
2618 /* Add default mac address */
2619 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2620 MAIN_VSI_POOL_NUMBER);
2626 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2628 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2630 PMD_INIT_FUNC_TRACE();
2632 /* only uninitialize in the primary process */
2633 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2636 /* safe to close dev here */
2637 fm10k_dev_close(dev);
2639 dev->dev_ops = NULL;
2640 dev->rx_pkt_burst = NULL;
2641 dev->tx_pkt_burst = NULL;
2643 /* disable uio/vfio intr */
2644 rte_intr_disable(&(dev->pci_dev->intr_handle));
2646 /*PF/VF has different interrupt handling mechanism */
2647 if (hw->mac.type == fm10k_mac_pf) {
2648 /* disable interrupt */
2649 fm10k_dev_disable_intr_pf(dev);
2651 /* unregister callback func to eal lib */
2652 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2653 fm10k_dev_interrupt_handler_pf, (void *)dev);
2655 /* disable interrupt */
2656 fm10k_dev_disable_intr_vf(dev);
2658 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2659 fm10k_dev_interrupt_handler_vf, (void *)dev);
2662 /* free mac memory */
2663 if (dev->data->mac_addrs) {
2664 rte_free(dev->data->mac_addrs);
2665 dev->data->mac_addrs = NULL;
2668 memset(hw, 0, sizeof(*hw));
2674 * The set of PCI devices this driver supports. This driver will enable both PF
2675 * and SRIOV-VF devices.
2677 static const struct rte_pci_id pci_id_fm10k_map[] = {
2678 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2679 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2680 #include "rte_pci_dev_ids.h"
2681 { .vendor_id = 0, /* sentinel */ },
2684 static struct eth_driver rte_pmd_fm10k = {
2686 .name = "rte_pmd_fm10k",
2687 .id_table = pci_id_fm10k_map,
2688 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2690 .eth_dev_init = eth_fm10k_dev_init,
2691 .eth_dev_uninit = eth_fm10k_dev_uninit,
2692 .dev_private_size = sizeof(struct fm10k_adapter),
2696 * Driver initialization routine.
2697 * Invoked once at EAL init time.
2698 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2701 rte_pmd_fm10k_init(__rte_unused const char *name,
2702 __rte_unused const char *params)
2704 PMD_INIT_FUNC_TRACE();
2705 rte_eth_driver_register(&rte_pmd_fm10k);
2709 static struct rte_driver rte_fm10k_driver = {
2711 .init = rte_pmd_fm10k_init,
2714 PMD_REGISTER_DRIVER(rte_fm10k_driver);