4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 #define MAIN_VSI_POOL_NUMBER 0
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US 100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
59 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
61 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
62 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
63 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
65 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
66 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
67 const u8 *mac, bool add, uint32_t pool);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
70 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
73 fm10k_mbx_initlock(struct fm10k_hw *hw)
75 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
79 fm10k_mbx_lock(struct fm10k_hw *hw)
81 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
82 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
86 fm10k_mbx_unlock(struct fm10k_hw *hw)
88 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
92 * reset queue to initial state, allocate software buffers used when starting
95 * return -ENOMEM if buffers cannot be allocated
96 * return -EINVAL if buffers do not satisfy alignment condition
99 rx_queue_reset(struct fm10k_rx_queue *q)
103 PMD_INIT_FUNC_TRACE();
105 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
109 for (i = 0; i < q->nb_desc; ++i) {
110 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
111 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
112 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
116 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
117 q->hw_ring[i].q.pkt_addr = dma_addr;
118 q->hw_ring[i].q.hdr_addr = dma_addr;
123 q->next_trigger = q->alloc_thresh - 1;
124 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
125 q->rxrearm_start = 0;
132 * clean queue, descriptor rings, free software buffers used when stopping
136 rx_queue_clean(struct fm10k_rx_queue *q)
138 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
140 PMD_INIT_FUNC_TRACE();
142 /* zero descriptor rings */
143 for (i = 0; i < q->nb_desc; ++i)
144 q->hw_ring[i] = zero;
146 /* vPMD driver has a different way of releasing mbufs. */
147 if (q->rx_using_sse) {
148 fm10k_rx_queue_release_mbufs_vec(q);
152 /* free software buffers */
153 for (i = 0; i < q->nb_desc; ++i) {
155 rte_pktmbuf_free_seg(q->sw_ring[i]);
156 q->sw_ring[i] = NULL;
162 * free all queue memory used when releasing the queue (i.e. configure)
165 rx_queue_free(struct fm10k_rx_queue *q)
167 PMD_INIT_FUNC_TRACE();
169 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
172 rte_free(q->sw_ring);
181 * disable RX queue, wait unitl HW finished necessary flush operation
184 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
188 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
189 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
190 reg & ~FM10K_RXQCTL_ENABLE);
192 /* Wait 100us at most */
193 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
195 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
196 if (!(reg & FM10K_RXQCTL_ENABLE))
200 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
207 * reset queue to initial state, allocate software buffers used when starting
211 tx_queue_reset(struct fm10k_tx_queue *q)
213 PMD_INIT_FUNC_TRACE();
217 q->nb_free = q->nb_desc - 1;
218 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
219 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
223 * clean queue, descriptor rings, free software buffers used when stopping
227 tx_queue_clean(struct fm10k_tx_queue *q)
229 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
231 PMD_INIT_FUNC_TRACE();
233 /* zero descriptor rings */
234 for (i = 0; i < q->nb_desc; ++i)
235 q->hw_ring[i] = zero;
237 /* free software buffers */
238 for (i = 0; i < q->nb_desc; ++i) {
240 rte_pktmbuf_free_seg(q->sw_ring[i]);
241 q->sw_ring[i] = NULL;
247 * free all queue memory used when releasing the queue (i.e. configure)
250 tx_queue_free(struct fm10k_tx_queue *q)
252 PMD_INIT_FUNC_TRACE();
254 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
256 if (q->rs_tracker.list) {
257 rte_free(q->rs_tracker.list);
258 q->rs_tracker.list = NULL;
261 rte_free(q->sw_ring);
270 * disable TX queue, wait unitl HW finished necessary flush operation
273 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
277 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
278 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
279 reg & ~FM10K_TXDCTL_ENABLE);
281 /* Wait 100us at most */
282 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
284 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
285 if (!(reg & FM10K_TXDCTL_ENABLE))
289 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
296 fm10k_check_mq_mode(struct rte_eth_dev *dev)
298 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
299 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
300 struct rte_eth_vmdq_rx_conf *vmdq_conf;
301 uint16_t nb_rx_q = dev->data->nb_rx_queues;
303 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
305 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
306 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
310 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
313 if (hw->mac.type == fm10k_mac_vf) {
314 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
318 /* Check VMDQ queue pool number */
319 if (vmdq_conf->nb_queue_pools >
320 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
321 vmdq_conf->nb_queue_pools > nb_rx_q) {
322 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
323 vmdq_conf->nb_queue_pools);
331 fm10k_dev_configure(struct rte_eth_dev *dev)
335 PMD_INIT_FUNC_TRACE();
337 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
338 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
339 /* multipe queue mode checking */
340 ret = fm10k_check_mq_mode(dev);
342 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
350 /* fls = find last set bit = 32 minus the number of leading zeros */
352 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
356 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
358 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
359 struct rte_eth_vmdq_rx_conf *vmdq_conf;
362 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
364 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
365 if (!vmdq_conf->pool_map[i].pools)
368 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
369 fm10k_mbx_unlock(hw);
374 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
376 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
378 /* Add default mac address */
379 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
380 MAIN_VSI_POOL_NUMBER);
384 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
386 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
388 uint32_t mrqc, *key, i, reta, j;
391 #define RSS_KEY_SIZE 40
392 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
393 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
394 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
395 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
396 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
397 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
400 if (dev->data->nb_rx_queues == 1 ||
401 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
402 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
405 /* random key is rss_intel_key (default) or user provided (rss_key) */
406 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
407 key = (uint32_t *)rss_intel_key;
409 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
411 /* Now fill our hash function seeds, 4 bytes at a time */
412 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
413 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
416 * Fill in redirection table
417 * The byte-swap is needed because NIC registers are in
418 * little-endian order.
421 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
422 if (j == dev->data->nb_rx_queues)
424 reta = (reta << CHAR_BIT) | j;
426 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
431 * Generate RSS hash based on packet types, TCP/UDP
432 * port numbers and/or IPv4/v6 src and dst addresses
434 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
436 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
437 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
438 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
439 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
440 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
441 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
442 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
443 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
444 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
447 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
452 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
456 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
457 uint16_t nb_lport_old, uint16_t nb_lport_new)
459 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463 /* Disable previous logic ports */
465 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
466 nb_lport_old, false);
467 /* Enable new logic ports */
468 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
470 fm10k_mbx_unlock(hw);
472 for (i = 0; i < nb_lport_new; i++) {
473 /* Set unicast mode by default. App can change
474 * to other mode in other API func.
477 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
478 FM10K_XCAST_MODE_NONE);
479 fm10k_mbx_unlock(hw);
484 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
486 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
487 struct rte_eth_vmdq_rx_conf *vmdq_conf;
488 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
489 struct fm10k_macvlan_filter_info *macvlan;
490 uint16_t nb_queue_pools = 0; /* pool number in configuration */
491 uint16_t nb_lport_new, nb_lport_old;
493 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
494 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
496 fm10k_dev_rss_configure(dev);
498 /* only PF supports VMDQ */
499 if (hw->mac.type != fm10k_mac_pf)
502 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
503 nb_queue_pools = vmdq_conf->nb_queue_pools;
505 /* no pool number change, no need to update logic port and VLAN/MAC */
506 if (macvlan->nb_queue_pools == nb_queue_pools)
509 nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
510 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
511 fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
513 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
514 memset(dev->data->mac_addrs, 0,
515 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
516 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
517 &dev->data->mac_addrs[0]);
518 memset(macvlan, 0, sizeof(*macvlan));
519 macvlan->nb_queue_pools = nb_queue_pools;
522 fm10k_dev_vmdq_rx_configure(dev);
524 fm10k_dev_pf_main_vsi_reset(dev);
528 fm10k_dev_tx_init(struct rte_eth_dev *dev)
530 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
532 struct fm10k_tx_queue *txq;
536 /* Disable TXINT to avoid possible interrupt */
537 for (i = 0; i < hw->mac.max_queues; i++)
538 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
539 3 << FM10K_TXINT_TIMER_SHIFT);
542 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
543 txq = dev->data->tx_queues[i];
544 base_addr = txq->hw_ring_phys_addr;
545 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
547 /* disable queue to avoid issues while updating state */
548 ret = tx_queue_disable(hw, i);
550 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
554 /* set location and size for descriptor ring */
555 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
556 base_addr & UINT64_LOWER_32BITS_MASK);
557 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
558 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
559 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
565 fm10k_dev_rx_init(struct rte_eth_dev *dev)
567 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
569 struct fm10k_rx_queue *rxq;
572 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
575 /* Disable RXINT to avoid possible interrupt */
576 for (i = 0; i < hw->mac.max_queues; i++)
577 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
578 3 << FM10K_RXINT_TIMER_SHIFT);
580 /* Setup RX queues */
581 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
582 rxq = dev->data->rx_queues[i];
583 base_addr = rxq->hw_ring_phys_addr;
584 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
586 /* disable queue to avoid issues while updating state */
587 ret = rx_queue_disable(hw, i);
589 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
593 /* Setup the Base and Length of the Rx Descriptor Ring */
594 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
595 base_addr & UINT64_LOWER_32BITS_MASK);
596 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
597 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
598 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
600 /* Configure the Rx buffer size for one buff without split */
601 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
602 RTE_PKTMBUF_HEADROOM);
603 /* As RX buffer is aligned to 512B within mbuf, some bytes are
604 * reserved for this purpose, and the worst case could be 511B.
605 * But SRR reg assumes all buffers have the same size. In order
606 * to fill the gap, we'll have to consider the worst case and
607 * assume 512B is reserved. If we don't do so, it's possible
608 * for HW to overwrite data to next mbuf.
610 buf_size -= FM10K_RX_DATABUF_ALIGN;
612 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
613 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
615 /* It adds dual VLAN length for supporting dual VLAN */
616 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
617 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
618 dev->data->dev_conf.rxmode.enable_scatter) {
620 dev->data->scattered_rx = 1;
621 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
622 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
623 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
626 /* Enable drop on empty, it's RO for VF */
627 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
628 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
630 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
631 FM10K_WRITE_FLUSH(hw);
634 /* Configure VMDQ/RSS if applicable */
635 fm10k_dev_mq_rx_configure(dev);
637 /* Decide the best RX function */
638 fm10k_set_rx_function(dev);
644 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
646 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649 struct fm10k_rx_queue *rxq;
651 PMD_INIT_FUNC_TRACE();
653 if (rx_queue_id < dev->data->nb_rx_queues) {
654 rxq = dev->data->rx_queues[rx_queue_id];
655 err = rx_queue_reset(rxq);
656 if (err == -ENOMEM) {
657 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
659 } else if (err == -EINVAL) {
660 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
665 /* Setup the HW Rx Head and Tail Descriptor Pointers
666 * Note: this must be done AFTER the queue is enabled on real
667 * hardware, but BEFORE the queue is enabled when using the
668 * emulation platform. Do it in both places for now and remove
669 * this comment and the following two register writes when the
670 * emulation platform is no longer being used.
672 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
673 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
675 /* Set PF ownership flag for PF devices */
676 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
677 if (hw->mac.type == fm10k_mac_pf)
678 reg |= FM10K_RXQCTL_PF;
679 reg |= FM10K_RXQCTL_ENABLE;
680 /* enable RX queue */
681 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
682 FM10K_WRITE_FLUSH(hw);
684 /* Setup the HW Rx Head and Tail Descriptor Pointers
685 * Note: this must be done AFTER the queue is enabled
687 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
688 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
695 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
697 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
699 PMD_INIT_FUNC_TRACE();
701 if (rx_queue_id < dev->data->nb_rx_queues) {
702 /* Disable RX queue */
703 rx_queue_disable(hw, rx_queue_id);
705 /* Free mbuf and clean HW ring */
706 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
713 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
715 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716 /** @todo - this should be defined in the shared code */
717 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
718 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
721 PMD_INIT_FUNC_TRACE();
723 if (tx_queue_id < dev->data->nb_tx_queues) {
724 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
726 /* reset head and tail pointers */
727 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
728 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
730 /* enable TX queue */
731 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
732 FM10K_TXDCTL_ENABLE | txdctl);
733 FM10K_WRITE_FLUSH(hw);
741 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
743 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
745 PMD_INIT_FUNC_TRACE();
747 if (tx_queue_id < dev->data->nb_tx_queues) {
748 tx_queue_disable(hw, tx_queue_id);
749 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
755 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
757 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
758 != FM10K_DGLORTMAP_NONE);
762 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
764 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
767 PMD_INIT_FUNC_TRACE();
769 /* Return if it didn't acquire valid glort range */
770 if (!fm10k_glort_valid(hw))
774 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
775 FM10K_XCAST_MODE_PROMISC);
776 fm10k_mbx_unlock(hw);
778 if (status != FM10K_SUCCESS)
779 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
783 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
785 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
789 PMD_INIT_FUNC_TRACE();
791 /* Return if it didn't acquire valid glort range */
792 if (!fm10k_glort_valid(hw))
795 if (dev->data->all_multicast == 1)
796 mode = FM10K_XCAST_MODE_ALLMULTI;
798 mode = FM10K_XCAST_MODE_NONE;
801 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
803 fm10k_mbx_unlock(hw);
805 if (status != FM10K_SUCCESS)
806 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
810 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
812 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
815 PMD_INIT_FUNC_TRACE();
817 /* Return if it didn't acquire valid glort range */
818 if (!fm10k_glort_valid(hw))
821 /* If promiscuous mode is enabled, it doesn't make sense to enable
822 * allmulticast and disable promiscuous since fm10k only can select
825 if (dev->data->promiscuous) {
826 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
827 "needn't enable allmulticast");
832 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
833 FM10K_XCAST_MODE_ALLMULTI);
834 fm10k_mbx_unlock(hw);
836 if (status != FM10K_SUCCESS)
837 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
841 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
843 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
846 PMD_INIT_FUNC_TRACE();
848 /* Return if it didn't acquire valid glort range */
849 if (!fm10k_glort_valid(hw))
852 if (dev->data->promiscuous) {
853 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
854 "since promisc mode is enabled");
859 /* Change mode to unicast mode */
860 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
861 FM10K_XCAST_MODE_NONE);
862 fm10k_mbx_unlock(hw);
864 if (status != FM10K_SUCCESS)
865 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
869 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
871 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
872 uint32_t dglortdec, pool_len, rss_len, i;
873 uint16_t nb_queue_pools;
874 struct fm10k_macvlan_filter_info *macvlan;
876 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
877 nb_queue_pools = macvlan->nb_queue_pools;
878 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
879 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
880 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
882 /* Establish only MAP 0 as valid */
883 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
885 /* Configure VMDQ/RSS DGlort Decoder */
886 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
888 /* Invalidate all other GLORT entries */
889 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
890 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
891 FM10K_DGLORTMAP_NONE);
894 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
896 fm10k_dev_start(struct rte_eth_dev *dev)
898 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901 PMD_INIT_FUNC_TRACE();
903 /* stop, init, then start the hw */
904 diag = fm10k_stop_hw(hw);
905 if (diag != FM10K_SUCCESS) {
906 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
910 diag = fm10k_init_hw(hw);
911 if (diag != FM10K_SUCCESS) {
912 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
916 diag = fm10k_start_hw(hw);
917 if (diag != FM10K_SUCCESS) {
918 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
922 diag = fm10k_dev_tx_init(dev);
924 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
928 diag = fm10k_dev_rx_init(dev);
930 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
934 if (hw->mac.type == fm10k_mac_pf)
935 fm10k_dev_dglort_map_configure(dev);
937 for (i = 0; i < dev->data->nb_rx_queues; i++) {
938 struct fm10k_rx_queue *rxq;
939 rxq = dev->data->rx_queues[i];
941 if (rxq->rx_deferred_start)
943 diag = fm10k_dev_rx_queue_start(dev, i);
946 for (j = 0; j < i; ++j)
947 rx_queue_clean(dev->data->rx_queues[j]);
952 for (i = 0; i < dev->data->nb_tx_queues; i++) {
953 struct fm10k_tx_queue *txq;
954 txq = dev->data->tx_queues[i];
956 if (txq->tx_deferred_start)
958 diag = fm10k_dev_tx_queue_start(dev, i);
961 for (j = 0; j < i; ++j)
962 tx_queue_clean(dev->data->tx_queues[j]);
963 for (j = 0; j < dev->data->nb_rx_queues; ++j)
964 rx_queue_clean(dev->data->rx_queues[j]);
969 /* Update default vlan when not in VMDQ mode */
970 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
971 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
977 fm10k_dev_stop(struct rte_eth_dev *dev)
981 PMD_INIT_FUNC_TRACE();
983 if (dev->data->tx_queues)
984 for (i = 0; i < dev->data->nb_tx_queues; i++)
985 fm10k_dev_tx_queue_stop(dev, i);
987 if (dev->data->rx_queues)
988 for (i = 0; i < dev->data->nb_rx_queues; i++)
989 fm10k_dev_rx_queue_stop(dev, i);
993 fm10k_dev_queue_release(struct rte_eth_dev *dev)
997 PMD_INIT_FUNC_TRACE();
999 if (dev->data->tx_queues) {
1000 for (i = 0; i < dev->data->nb_tx_queues; i++)
1001 fm10k_tx_queue_release(dev->data->tx_queues[i]);
1004 if (dev->data->rx_queues) {
1005 for (i = 0; i < dev->data->nb_rx_queues; i++)
1006 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1011 fm10k_dev_close(struct rte_eth_dev *dev)
1013 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1015 struct fm10k_macvlan_filter_info *macvlan;
1017 PMD_INIT_FUNC_TRACE();
1019 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1020 nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1022 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1024 fm10k_mbx_unlock(hw);
1026 /* Stop mailbox service first */
1027 fm10k_close_mbx_service(hw);
1028 fm10k_dev_stop(dev);
1029 fm10k_dev_queue_release(dev);
1034 fm10k_link_update(struct rte_eth_dev *dev,
1035 __rte_unused int wait_to_complete)
1037 PMD_INIT_FUNC_TRACE();
1039 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1040 * x8 PCIe interface. For now, we leave the speed undefined since there
1041 * is no 50Gbps Ethernet. */
1042 dev->data->dev_link.link_speed = 0;
1043 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1044 dev->data->dev_link.link_status = 1;
1050 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1052 uint64_t ipackets, opackets, ibytes, obytes;
1053 struct fm10k_hw *hw =
1054 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1055 struct fm10k_hw_stats *hw_stats =
1056 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1059 PMD_INIT_FUNC_TRACE();
1061 fm10k_update_hw_stats(hw, hw_stats);
1063 ipackets = opackets = ibytes = obytes = 0;
1064 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1065 (i < hw->mac.max_queues); ++i) {
1066 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1067 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1068 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1069 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1070 ipackets += stats->q_ipackets[i];
1071 opackets += stats->q_opackets[i];
1072 ibytes += stats->q_ibytes[i];
1073 obytes += stats->q_obytes[i];
1075 stats->ipackets = ipackets;
1076 stats->opackets = opackets;
1077 stats->ibytes = ibytes;
1078 stats->obytes = obytes;
1082 fm10k_stats_reset(struct rte_eth_dev *dev)
1084 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1085 struct fm10k_hw_stats *hw_stats =
1086 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1088 PMD_INIT_FUNC_TRACE();
1090 memset(hw_stats, 0, sizeof(*hw_stats));
1091 fm10k_rebind_hw_stats(hw, hw_stats);
1095 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1096 struct rte_eth_dev_info *dev_info)
1098 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1100 PMD_INIT_FUNC_TRACE();
1102 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1103 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1104 dev_info->max_rx_queues = hw->mac.max_queues;
1105 dev_info->max_tx_queues = hw->mac.max_queues;
1106 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1107 dev_info->max_hash_mac_addrs = 0;
1108 dev_info->max_vfs = dev->pci_dev->max_vfs;
1109 dev_info->vmdq_pool_base = 0;
1110 dev_info->vmdq_queue_base = 0;
1111 dev_info->max_vmdq_pools = ETH_32_POOLS;
1112 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1113 dev_info->rx_offload_capa =
1114 DEV_RX_OFFLOAD_VLAN_STRIP |
1115 DEV_RX_OFFLOAD_IPV4_CKSUM |
1116 DEV_RX_OFFLOAD_UDP_CKSUM |
1117 DEV_RX_OFFLOAD_TCP_CKSUM;
1118 dev_info->tx_offload_capa =
1119 DEV_TX_OFFLOAD_VLAN_INSERT |
1120 DEV_TX_OFFLOAD_IPV4_CKSUM |
1121 DEV_TX_OFFLOAD_UDP_CKSUM |
1122 DEV_TX_OFFLOAD_TCP_CKSUM |
1123 DEV_TX_OFFLOAD_TCP_TSO;
1125 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1126 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1128 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1130 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1131 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1132 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1134 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1138 dev_info->default_txconf = (struct rte_eth_txconf) {
1140 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1141 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1142 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1144 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1145 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1146 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1147 ETH_TXQ_FLAGS_NOOFFLOADS,
1150 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1151 .nb_max = FM10K_MAX_RX_DESC,
1152 .nb_min = FM10K_MIN_RX_DESC,
1153 .nb_align = FM10K_MULT_RX_DESC,
1156 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1157 .nb_max = FM10K_MAX_TX_DESC,
1158 .nb_min = FM10K_MIN_TX_DESC,
1159 .nb_align = FM10K_MULT_TX_DESC,
1164 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1167 uint16_t mac_num = 0;
1168 uint32_t vid_idx, vid_bit, mac_index;
1169 struct fm10k_hw *hw;
1170 struct fm10k_macvlan_filter_info *macvlan;
1171 struct rte_eth_dev_data *data = dev->data;
1173 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1176 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1177 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1181 if (vlan_id > ETH_VLAN_ID_MAX) {
1182 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1186 vid_idx = FM10K_VFTA_IDX(vlan_id);
1187 vid_bit = FM10K_VFTA_BIT(vlan_id);
1188 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1189 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1191 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1192 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1193 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1194 "in the VLAN filter table");
1199 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1200 fm10k_mbx_unlock(hw);
1201 if (result != FM10K_SUCCESS) {
1202 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1206 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1207 (result == FM10K_SUCCESS); mac_index++) {
1208 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1210 if (mac_num > macvlan->mac_num - 1) {
1211 PMD_INIT_LOG(ERR, "MAC address number "
1216 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1217 data->mac_addrs[mac_index].addr_bytes,
1219 fm10k_mbx_unlock(hw);
1222 if (result != FM10K_SUCCESS) {
1223 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1228 macvlan->vlan_num++;
1229 macvlan->vfta[vid_idx] |= vid_bit;
1231 macvlan->vlan_num--;
1232 macvlan->vfta[vid_idx] &= ~vid_bit;
1238 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1240 if (mask & ETH_VLAN_STRIP_MASK) {
1241 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1242 PMD_INIT_LOG(ERR, "VLAN stripping is "
1243 "always on in fm10k");
1246 if (mask & ETH_VLAN_EXTEND_MASK) {
1247 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1248 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1249 "supported in fm10k");
1252 if (mask & ETH_VLAN_FILTER_MASK) {
1253 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1254 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1258 /* Add/Remove a MAC address, and update filters to main VSI */
1259 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1260 const u8 *mac, bool add, uint32_t pool)
1262 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 struct fm10k_macvlan_filter_info *macvlan;
1266 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1268 if (pool != MAIN_VSI_POOL_NUMBER) {
1269 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1270 "mac to pool %u", pool);
1273 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1274 if (!macvlan->vfta[j])
1276 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1277 if (!(macvlan->vfta[j] & (1 << k)))
1279 if (i + 1 > macvlan->vlan_num) {
1280 PMD_INIT_LOG(ERR, "vlan number not match");
1284 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1285 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1286 fm10k_mbx_unlock(hw);
1292 /* Add/Remove a MAC address, and update filters to VMDQ */
1293 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1294 const u8 *mac, bool add, uint32_t pool)
1296 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1297 struct fm10k_macvlan_filter_info *macvlan;
1298 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1301 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1302 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1304 if (pool > macvlan->nb_queue_pools) {
1305 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1307 pool, macvlan->nb_queue_pools);
1310 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1311 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1314 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1315 vmdq_conf->pool_map[i].vlan_id, add, 0);
1316 fm10k_mbx_unlock(hw);
1320 /* Add/Remove a MAC address, and update filters */
1321 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1322 const u8 *mac, bool add, uint32_t pool)
1324 struct fm10k_macvlan_filter_info *macvlan;
1326 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1328 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1329 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1331 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1339 /* Add a MAC address, and update filters */
1341 fm10k_macaddr_add(struct rte_eth_dev *dev,
1342 struct ether_addr *mac_addr,
1346 struct fm10k_macvlan_filter_info *macvlan;
1348 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1349 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1350 macvlan->mac_vmdq_id[index] = pool;
1353 /* Remove a MAC address, and update filters */
1355 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1357 struct rte_eth_dev_data *data = dev->data;
1358 struct fm10k_macvlan_filter_info *macvlan;
1360 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1361 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1362 FALSE, macvlan->mac_vmdq_id[index]);
1363 macvlan->mac_vmdq_id[index] = 0;
1367 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1369 if ((request < min) || (request > max) || ((request % mult) != 0))
1376 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1377 * the physical address is required. If the memzone is already created, then
1378 * this function returns a pointer to the existing memzone.
1380 static inline const struct rte_memzone *
1381 allocate_hw_ring(const char *driver_name, const char *ring_name,
1382 uint8_t port_id, uint16_t queue_id, int socket_id,
1383 uint32_t size, uint32_t align)
1385 char name[RTE_MEMZONE_NAMESIZE];
1386 const struct rte_memzone *mz;
1388 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1389 driver_name, ring_name, port_id, queue_id, socket_id);
1391 /* return the memzone if it already exists */
1392 mz = rte_memzone_lookup(name);
1396 #ifdef RTE_LIBRTE_XEN_DOM0
1397 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1400 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1405 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1407 if ((request < min) || (request > max) || ((div % request) != 0))
1414 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1416 uint16_t rx_free_thresh;
1418 if (conf->rx_free_thresh == 0)
1419 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1421 rx_free_thresh = conf->rx_free_thresh;
1423 /* make sure the requested threshold satisfies the constraints */
1424 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1425 FM10K_RX_FREE_THRESH_MAX(q),
1426 FM10K_RX_FREE_THRESH_DIV(q),
1428 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1429 "less than or equal to %u, "
1430 "greater than or equal to %u, "
1431 "and a divisor of %u",
1432 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1433 FM10K_RX_FREE_THRESH_MIN(q),
1434 FM10K_RX_FREE_THRESH_DIV(q));
1438 q->alloc_thresh = rx_free_thresh;
1439 q->drop_en = conf->rx_drop_en;
1440 q->rx_deferred_start = conf->rx_deferred_start;
1446 * Hardware requires specific alignment for Rx packet buffers. At
1447 * least one of the following two conditions must be satisfied.
1448 * 1. Address is 512B aligned
1449 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1451 * As such, the driver may need to adjust the DMA address within the
1452 * buffer by up to 512B.
1454 * return 1 if the element size is valid, otherwise return 0.
1457 mempool_element_size_valid(struct rte_mempool *mp)
1461 /* elt_size includes mbuf header and headroom */
1462 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1463 RTE_PKTMBUF_HEADROOM;
1465 /* account for up to 512B of alignment */
1466 min_size -= FM10K_RX_DATABUF_ALIGN;
1468 /* sanity check for overflow */
1469 if (min_size > mp->elt_size)
1477 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1478 uint16_t nb_desc, unsigned int socket_id,
1479 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1481 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1482 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1483 struct fm10k_rx_queue *q;
1484 const struct rte_memzone *mz;
1486 PMD_INIT_FUNC_TRACE();
1488 /* make sure the mempool element size can account for alignment. */
1489 if (!mempool_element_size_valid(mp)) {
1490 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1494 /* make sure a valid number of descriptors have been requested */
1495 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1496 FM10K_MULT_RX_DESC, nb_desc)) {
1497 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1498 "less than or equal to %"PRIu32", "
1499 "greater than or equal to %u, "
1500 "and a multiple of %u",
1501 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1502 FM10K_MULT_RX_DESC);
1507 * if this queue existed already, free the associated memory. The
1508 * queue cannot be reused in case we need to allocate memory on
1509 * different socket than was previously used.
1511 if (dev->data->rx_queues[queue_id] != NULL) {
1512 rx_queue_free(dev->data->rx_queues[queue_id]);
1513 dev->data->rx_queues[queue_id] = NULL;
1516 /* allocate memory for the queue structure */
1517 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1520 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1526 q->nb_desc = nb_desc;
1527 q->port_id = dev->data->port_id;
1528 q->queue_id = queue_id;
1529 q->tail_ptr = (volatile uint32_t *)
1530 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1531 if (handle_rxconf(q, conf))
1534 /* allocate memory for the software ring */
1535 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1536 nb_desc * sizeof(struct rte_mbuf *),
1537 RTE_CACHE_LINE_SIZE, socket_id);
1538 if (q->sw_ring == NULL) {
1539 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1545 * allocate memory for the hardware descriptor ring. A memzone large
1546 * enough to hold the maximum ring size is requested to allow for
1547 * resizing in later calls to the queue setup function.
1549 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1550 dev->data->port_id, queue_id, socket_id,
1551 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1553 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1554 rte_free(q->sw_ring);
1558 q->hw_ring = mz->addr;
1559 #ifdef RTE_LIBRTE_XEN_DOM0
1560 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1562 q->hw_ring_phys_addr = mz->phys_addr;
1565 /* Check if number of descs satisfied Vector requirement */
1566 if (!rte_is_power_of_2(nb_desc)) {
1567 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1568 "preconditions - canceling the feature for "
1569 "the whole port[%d]",
1570 q->queue_id, q->port_id);
1571 dev_info->rx_vec_allowed = false;
1573 fm10k_rxq_vec_setup(q);
1575 dev->data->rx_queues[queue_id] = q;
1580 fm10k_rx_queue_release(void *queue)
1582 PMD_INIT_FUNC_TRACE();
1584 rx_queue_free(queue);
1588 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1590 uint16_t tx_free_thresh;
1591 uint16_t tx_rs_thresh;
1593 /* constraint MACROs require that tx_free_thresh is configured
1594 * before tx_rs_thresh */
1595 if (conf->tx_free_thresh == 0)
1596 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1598 tx_free_thresh = conf->tx_free_thresh;
1600 /* make sure the requested threshold satisfies the constraints */
1601 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1602 FM10K_TX_FREE_THRESH_MAX(q),
1603 FM10K_TX_FREE_THRESH_DIV(q),
1605 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1606 "less than or equal to %u, "
1607 "greater than or equal to %u, "
1608 "and a divisor of %u",
1609 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1610 FM10K_TX_FREE_THRESH_MIN(q),
1611 FM10K_TX_FREE_THRESH_DIV(q));
1615 q->free_thresh = tx_free_thresh;
1617 if (conf->tx_rs_thresh == 0)
1618 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1620 tx_rs_thresh = conf->tx_rs_thresh;
1622 q->tx_deferred_start = conf->tx_deferred_start;
1624 /* make sure the requested threshold satisfies the constraints */
1625 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1626 FM10K_TX_RS_THRESH_MAX(q),
1627 FM10K_TX_RS_THRESH_DIV(q),
1629 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1630 "less than or equal to %u, "
1631 "greater than or equal to %u, "
1632 "and a divisor of %u",
1633 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1634 FM10K_TX_RS_THRESH_MIN(q),
1635 FM10K_TX_RS_THRESH_DIV(q));
1639 q->rs_thresh = tx_rs_thresh;
1645 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1646 uint16_t nb_desc, unsigned int socket_id,
1647 const struct rte_eth_txconf *conf)
1649 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1650 struct fm10k_tx_queue *q;
1651 const struct rte_memzone *mz;
1653 PMD_INIT_FUNC_TRACE();
1655 /* make sure a valid number of descriptors have been requested */
1656 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1657 FM10K_MULT_TX_DESC, nb_desc)) {
1658 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1659 "less than or equal to %"PRIu32", "
1660 "greater than or equal to %u, "
1661 "and a multiple of %u",
1662 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1663 FM10K_MULT_TX_DESC);
1668 * if this queue existed already, free the associated memory. The
1669 * queue cannot be reused in case we need to allocate memory on
1670 * different socket than was previously used.
1672 if (dev->data->tx_queues[queue_id] != NULL) {
1673 tx_queue_free(dev->data->tx_queues[queue_id]);
1674 dev->data->tx_queues[queue_id] = NULL;
1677 /* allocate memory for the queue structure */
1678 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1681 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1686 q->nb_desc = nb_desc;
1687 q->port_id = dev->data->port_id;
1688 q->queue_id = queue_id;
1689 q->tail_ptr = (volatile uint32_t *)
1690 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1691 if (handle_txconf(q, conf))
1694 /* allocate memory for the software ring */
1695 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1696 nb_desc * sizeof(struct rte_mbuf *),
1697 RTE_CACHE_LINE_SIZE, socket_id);
1698 if (q->sw_ring == NULL) {
1699 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1705 * allocate memory for the hardware descriptor ring. A memzone large
1706 * enough to hold the maximum ring size is requested to allow for
1707 * resizing in later calls to the queue setup function.
1709 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1710 dev->data->port_id, queue_id, socket_id,
1711 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1713 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1714 rte_free(q->sw_ring);
1718 q->hw_ring = mz->addr;
1719 #ifdef RTE_LIBRTE_XEN_DOM0
1720 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1722 q->hw_ring_phys_addr = mz->phys_addr;
1726 * allocate memory for the RS bit tracker. Enough slots to hold the
1727 * descriptor index for each RS bit needing to be set are required.
1729 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1730 ((nb_desc + 1) / q->rs_thresh) *
1732 RTE_CACHE_LINE_SIZE, socket_id);
1733 if (q->rs_tracker.list == NULL) {
1734 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1735 rte_free(q->sw_ring);
1740 dev->data->tx_queues[queue_id] = q;
1745 fm10k_tx_queue_release(void *queue)
1747 PMD_INIT_FUNC_TRACE();
1749 tx_queue_free(queue);
1753 fm10k_reta_update(struct rte_eth_dev *dev,
1754 struct rte_eth_rss_reta_entry64 *reta_conf,
1757 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1758 uint16_t i, j, idx, shift;
1762 PMD_INIT_FUNC_TRACE();
1764 if (reta_size > FM10K_MAX_RSS_INDICES) {
1765 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1766 "(%d) doesn't match the number hardware can supported "
1767 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1772 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1773 * 128-entries in 32 registers
1775 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1776 idx = i / RTE_RETA_GROUP_SIZE;
1777 shift = i % RTE_RETA_GROUP_SIZE;
1778 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1779 BIT_MASK_PER_UINT32);
1784 if (mask != BIT_MASK_PER_UINT32)
1785 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1787 for (j = 0; j < CHARS_PER_UINT32; j++) {
1788 if (mask & (0x1 << j)) {
1790 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1791 reta |= reta_conf[idx].reta[shift + j] <<
1795 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1802 fm10k_reta_query(struct rte_eth_dev *dev,
1803 struct rte_eth_rss_reta_entry64 *reta_conf,
1806 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1807 uint16_t i, j, idx, shift;
1811 PMD_INIT_FUNC_TRACE();
1813 if (reta_size < FM10K_MAX_RSS_INDICES) {
1814 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1815 "(%d) doesn't match the number hardware can supported "
1816 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1821 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1822 * 128-entries in 32 registers
1824 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1825 idx = i / RTE_RETA_GROUP_SIZE;
1826 shift = i % RTE_RETA_GROUP_SIZE;
1827 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1828 BIT_MASK_PER_UINT32);
1832 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1833 for (j = 0; j < CHARS_PER_UINT32; j++) {
1834 if (mask & (0x1 << j))
1835 reta_conf[idx].reta[shift + j] = ((reta >>
1836 CHAR_BIT * j) & UINT8_MAX);
1844 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1845 struct rte_eth_rss_conf *rss_conf)
1847 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1848 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1850 uint64_t hf = rss_conf->rss_hf;
1853 PMD_INIT_FUNC_TRACE();
1855 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1856 FM10K_RSSRK_ENTRIES_PER_REG)
1863 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1864 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1865 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1866 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1867 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1868 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1869 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1870 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1871 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1873 /* If the mapping doesn't fit any supported, return */
1878 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1879 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1881 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1887 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1888 struct rte_eth_rss_conf *rss_conf)
1890 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1891 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1896 PMD_INIT_FUNC_TRACE();
1898 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1899 FM10K_RSSRK_ENTRIES_PER_REG)
1903 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1904 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1906 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1908 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1909 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1910 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1911 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1912 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1913 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1914 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1915 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1916 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1918 rss_conf->rss_hf = hf;
1924 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1926 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1927 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1929 /* Bind all local non-queue interrupt to vector 0 */
1932 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1933 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1934 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1935 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1936 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1937 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1939 /* Enable misc causes */
1940 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1941 FM10K_EIMR_ENABLE(THI_FAULT) |
1942 FM10K_EIMR_ENABLE(FUM_FAULT) |
1943 FM10K_EIMR_ENABLE(MAILBOX) |
1944 FM10K_EIMR_ENABLE(SWITCHREADY) |
1945 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1946 FM10K_EIMR_ENABLE(SRAMERROR) |
1947 FM10K_EIMR_ENABLE(VFLR));
1950 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1951 FM10K_ITR_MASK_CLEAR);
1952 FM10K_WRITE_FLUSH(hw);
1956 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
1958 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1959 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1963 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1964 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1965 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1966 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1967 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1968 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1970 /* Disable misc causes */
1971 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
1972 FM10K_EIMR_DISABLE(THI_FAULT) |
1973 FM10K_EIMR_DISABLE(FUM_FAULT) |
1974 FM10K_EIMR_DISABLE(MAILBOX) |
1975 FM10K_EIMR_DISABLE(SWITCHREADY) |
1976 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1977 FM10K_EIMR_DISABLE(SRAMERROR) |
1978 FM10K_EIMR_DISABLE(VFLR));
1981 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
1982 FM10K_WRITE_FLUSH(hw);
1986 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1988 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1989 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1991 /* Bind all local non-queue interrupt to vector 0 */
1994 /* Only INT 0 available, other 15 are reserved. */
1995 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1998 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1999 FM10K_ITR_MASK_CLEAR);
2000 FM10K_WRITE_FLUSH(hw);
2004 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2006 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2007 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2011 /* Only INT 0 available, other 15 are reserved. */
2012 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2015 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2016 FM10K_WRITE_FLUSH(hw);
2020 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2022 struct fm10k_fault fault;
2024 const char *estr = "Unknown error";
2026 /* Process PCA fault */
2027 if (eicr & FM10K_EICR_PCA_FAULT) {
2028 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2031 switch (fault.type) {
2033 estr = "PCA_NO_FAULT"; break;
2034 case PCA_UNMAPPED_ADDR:
2035 estr = "PCA_UNMAPPED_ADDR"; break;
2036 case PCA_BAD_QACCESS_PF:
2037 estr = "PCA_BAD_QACCESS_PF"; break;
2038 case PCA_BAD_QACCESS_VF:
2039 estr = "PCA_BAD_QACCESS_VF"; break;
2040 case PCA_MALICIOUS_REQ:
2041 estr = "PCA_MALICIOUS_REQ"; break;
2042 case PCA_POISONED_TLP:
2043 estr = "PCA_POISONED_TLP"; break;
2045 estr = "PCA_TLP_ABORT"; break;
2049 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2050 estr, fault.func ? "VF" : "PF", fault.func,
2051 fault.address, fault.specinfo);
2054 /* Process THI fault */
2055 if (eicr & FM10K_EICR_THI_FAULT) {
2056 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2059 switch (fault.type) {
2061 estr = "THI_NO_FAULT"; break;
2062 case THI_MAL_DIS_Q_FAULT:
2063 estr = "THI_MAL_DIS_Q_FAULT"; break;
2067 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2068 estr, fault.func ? "VF" : "PF", fault.func,
2069 fault.address, fault.specinfo);
2072 /* Process FUM fault */
2073 if (eicr & FM10K_EICR_FUM_FAULT) {
2074 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2077 switch (fault.type) {
2079 estr = "FUM_NO_FAULT"; break;
2080 case FUM_UNMAPPED_ADDR:
2081 estr = "FUM_UNMAPPED_ADDR"; break;
2082 case FUM_POISONED_TLP:
2083 estr = "FUM_POISONED_TLP"; break;
2084 case FUM_BAD_VF_QACCESS:
2085 estr = "FUM_BAD_VF_QACCESS"; break;
2086 case FUM_ADD_DECODE_ERR:
2087 estr = "FUM_ADD_DECODE_ERR"; break;
2089 estr = "FUM_RO_ERROR"; break;
2090 case FUM_QPRC_CRC_ERROR:
2091 estr = "FUM_QPRC_CRC_ERROR"; break;
2092 case FUM_CSR_TIMEOUT:
2093 estr = "FUM_CSR_TIMEOUT"; break;
2094 case FUM_INVALID_TYPE:
2095 estr = "FUM_INVALID_TYPE"; break;
2096 case FUM_INVALID_LENGTH:
2097 estr = "FUM_INVALID_LENGTH"; break;
2098 case FUM_INVALID_BE:
2099 estr = "FUM_INVALID_BE"; break;
2100 case FUM_INVALID_ALIGN:
2101 estr = "FUM_INVALID_ALIGN"; break;
2105 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2106 estr, fault.func ? "VF" : "PF", fault.func,
2107 fault.address, fault.specinfo);
2112 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2117 * PF interrupt handler triggered by NIC for handling specific interrupt.
2120 * Pointer to interrupt handle.
2122 * The address of parameter (struct rte_eth_dev *) regsitered before.
2128 fm10k_dev_interrupt_handler_pf(
2129 __rte_unused struct rte_intr_handle *handle,
2132 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2133 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2134 uint32_t cause, status;
2136 if (hw->mac.type != fm10k_mac_pf)
2139 cause = FM10K_READ_REG(hw, FM10K_EICR);
2141 /* Handle PCI fault cases */
2142 if (cause & FM10K_EICR_FAULT_MASK) {
2143 PMD_INIT_LOG(ERR, "INT: find fault!");
2144 fm10k_dev_handle_fault(hw, cause);
2147 /* Handle switch up/down */
2148 if (cause & FM10K_EICR_SWITCHNOTREADY)
2149 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2151 if (cause & FM10K_EICR_SWITCHREADY)
2152 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2154 /* Handle mailbox message */
2156 hw->mbx.ops.process(hw, &hw->mbx);
2157 fm10k_mbx_unlock(hw);
2159 /* Handle SRAM error */
2160 if (cause & FM10K_EICR_SRAMERROR) {
2161 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2163 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2164 /* Write to clear pending bits */
2165 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2167 /* Todo: print out error message after shared code updates */
2170 /* Clear these 3 events if having any */
2171 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2172 FM10K_EICR_SWITCHREADY;
2174 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2176 /* Re-enable interrupt from device side */
2177 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2178 FM10K_ITR_MASK_CLEAR);
2179 /* Re-enable interrupt from host side */
2180 rte_intr_enable(&(dev->pci_dev->intr_handle));
2184 * VF interrupt handler triggered by NIC for handling specific interrupt.
2187 * Pointer to interrupt handle.
2189 * The address of parameter (struct rte_eth_dev *) regsitered before.
2195 fm10k_dev_interrupt_handler_vf(
2196 __rte_unused struct rte_intr_handle *handle,
2199 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2200 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2202 if (hw->mac.type != fm10k_mac_vf)
2205 /* Handle mailbox message if lock is acquired */
2207 hw->mbx.ops.process(hw, &hw->mbx);
2208 fm10k_mbx_unlock(hw);
2210 /* Re-enable interrupt from device side */
2211 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2212 FM10K_ITR_MASK_CLEAR);
2213 /* Re-enable interrupt from host side */
2214 rte_intr_enable(&(dev->pci_dev->intr_handle));
2217 /* Mailbox message handler in VF */
2218 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2219 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2220 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2221 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2222 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2225 /* Mailbox message handler in PF */
2226 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2227 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2228 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2229 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2230 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2231 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2232 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2233 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2237 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2241 /* Initialize mailbox lock */
2242 fm10k_mbx_initlock(hw);
2244 /* Replace default message handler with new ones */
2245 if (hw->mac.type == fm10k_mac_pf)
2246 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2248 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2251 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2255 /* Connect to SM for PF device or PF for VF device */
2256 return hw->mbx.ops.connect(hw, &hw->mbx);
2260 fm10k_close_mbx_service(struct fm10k_hw *hw)
2262 /* Disconnect from SM for PF device or PF for VF device */
2263 hw->mbx.ops.disconnect(hw, &hw->mbx);
2266 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2267 .dev_configure = fm10k_dev_configure,
2268 .dev_start = fm10k_dev_start,
2269 .dev_stop = fm10k_dev_stop,
2270 .dev_close = fm10k_dev_close,
2271 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2272 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2273 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2274 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2275 .stats_get = fm10k_stats_get,
2276 .stats_reset = fm10k_stats_reset,
2277 .link_update = fm10k_link_update,
2278 .dev_infos_get = fm10k_dev_infos_get,
2279 .vlan_filter_set = fm10k_vlan_filter_set,
2280 .vlan_offload_set = fm10k_vlan_offload_set,
2281 .mac_addr_add = fm10k_macaddr_add,
2282 .mac_addr_remove = fm10k_macaddr_remove,
2283 .rx_queue_start = fm10k_dev_rx_queue_start,
2284 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2285 .tx_queue_start = fm10k_dev_tx_queue_start,
2286 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2287 .rx_queue_setup = fm10k_rx_queue_setup,
2288 .rx_queue_release = fm10k_rx_queue_release,
2289 .tx_queue_setup = fm10k_tx_queue_setup,
2290 .tx_queue_release = fm10k_tx_queue_release,
2291 .reta_update = fm10k_reta_update,
2292 .reta_query = fm10k_reta_query,
2293 .rss_hash_update = fm10k_rss_hash_update,
2294 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2297 static void __attribute__((cold))
2298 fm10k_set_rx_function(struct rte_eth_dev *dev)
2300 struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2301 uint16_t i, rx_using_sse;
2303 /* In order to allow Vector Rx there are a few configuration
2304 * conditions to be met.
2306 if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2307 if (dev->data->scattered_rx)
2308 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2310 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2311 } else if (dev->data->scattered_rx)
2312 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2315 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2316 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2318 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2319 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2321 rxq->rx_using_sse = rx_using_sse;
2326 fm10k_params_init(struct rte_eth_dev *dev)
2328 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2329 struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2331 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2332 * there is no way to get link status without reading BAR4. Until this
2333 * works, assume we have maximum bandwidth.
2334 * @todo - fix bus info
2336 hw->bus_caps.speed = fm10k_bus_speed_8000;
2337 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2338 hw->bus_caps.payload = fm10k_bus_payload_512;
2339 hw->bus.speed = fm10k_bus_speed_8000;
2340 hw->bus.width = fm10k_bus_width_pcie_x8;
2341 hw->bus.payload = fm10k_bus_payload_256;
2343 info->rx_vec_allowed = true;
2347 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2349 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2351 struct fm10k_macvlan_filter_info *macvlan;
2353 PMD_INIT_FUNC_TRACE();
2355 dev->dev_ops = &fm10k_eth_dev_ops;
2356 dev->rx_pkt_burst = &fm10k_recv_pkts;
2357 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2359 /* only initialize in the primary process */
2360 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2363 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2364 memset(macvlan, 0, sizeof(*macvlan));
2365 /* Vendor and Device ID need to be set before init of shared code */
2366 memset(hw, 0, sizeof(*hw));
2367 hw->device_id = dev->pci_dev->id.device_id;
2368 hw->vendor_id = dev->pci_dev->id.vendor_id;
2369 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2370 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2371 hw->revision_id = 0;
2372 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2373 if (hw->hw_addr == NULL) {
2374 PMD_INIT_LOG(ERR, "Bad mem resource."
2375 " Try to blacklist unused devices.");
2379 /* Store fm10k_adapter pointer */
2380 hw->back = dev->data->dev_private;
2382 /* Initialize the shared code */
2383 diag = fm10k_init_shared_code(hw);
2384 if (diag != FM10K_SUCCESS) {
2385 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2389 /* Initialize parameters */
2390 fm10k_params_init(dev);
2392 /* Initialize the hw */
2393 diag = fm10k_init_hw(hw);
2394 if (diag != FM10K_SUCCESS) {
2395 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2399 /* Initialize MAC address(es) */
2400 dev->data->mac_addrs = rte_zmalloc("fm10k",
2401 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2402 if (dev->data->mac_addrs == NULL) {
2403 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2407 diag = fm10k_read_mac_addr(hw);
2409 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2410 &dev->data->mac_addrs[0]);
2412 if (diag != FM10K_SUCCESS ||
2413 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2415 /* Generate a random addr */
2416 eth_random_addr(hw->mac.addr);
2417 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2418 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2419 &dev->data->mac_addrs[0]);
2422 /* Reset the hw statistics */
2423 fm10k_stats_reset(dev);
2426 diag = fm10k_reset_hw(hw);
2427 if (diag != FM10K_SUCCESS) {
2428 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2432 /* Setup mailbox service */
2433 diag = fm10k_setup_mbx_service(hw);
2434 if (diag != FM10K_SUCCESS) {
2435 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2439 /*PF/VF has different interrupt handling mechanism */
2440 if (hw->mac.type == fm10k_mac_pf) {
2441 /* register callback func to eal lib */
2442 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2443 fm10k_dev_interrupt_handler_pf, (void *)dev);
2445 /* enable MISC interrupt */
2446 fm10k_dev_enable_intr_pf(dev);
2448 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2449 fm10k_dev_interrupt_handler_vf, (void *)dev);
2451 fm10k_dev_enable_intr_vf(dev);
2454 /* Enable uio intr after callback registered */
2455 rte_intr_enable(&(dev->pci_dev->intr_handle));
2457 hw->mac.ops.update_int_moderator(hw);
2459 /* Make sure Switch Manager is ready before going forward. */
2460 if (hw->mac.type == fm10k_mac_pf) {
2461 int switch_ready = 0;
2464 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2466 hw->mac.ops.get_host_state(hw, &switch_ready);
2467 fm10k_mbx_unlock(hw);
2470 /* Delay some time to acquire async LPORT_MAP info. */
2471 rte_delay_us(WAIT_SWITCH_MSG_US);
2474 if (switch_ready == 0) {
2475 PMD_INIT_LOG(ERR, "switch is not ready");
2481 * Below function will trigger operations on mailbox, acquire lock to
2482 * avoid race condition from interrupt handler. Operations on mailbox
2483 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2484 * will handle and generate an interrupt to our side. Then, FIFO in
2485 * mailbox will be touched.
2488 /* Enable port first */
2489 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2491 /* Set unicast mode by default. App can change to other mode in other
2494 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2495 FM10K_XCAST_MODE_NONE);
2497 fm10k_mbx_unlock(hw);
2499 /* Add default mac address */
2500 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2501 MAIN_VSI_POOL_NUMBER);
2507 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2509 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2511 PMD_INIT_FUNC_TRACE();
2513 /* only uninitialize in the primary process */
2514 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2517 /* safe to close dev here */
2518 fm10k_dev_close(dev);
2520 dev->dev_ops = NULL;
2521 dev->rx_pkt_burst = NULL;
2522 dev->tx_pkt_burst = NULL;
2524 /* disable uio/vfio intr */
2525 rte_intr_disable(&(dev->pci_dev->intr_handle));
2527 /*PF/VF has different interrupt handling mechanism */
2528 if (hw->mac.type == fm10k_mac_pf) {
2529 /* disable interrupt */
2530 fm10k_dev_disable_intr_pf(dev);
2532 /* unregister callback func to eal lib */
2533 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2534 fm10k_dev_interrupt_handler_pf, (void *)dev);
2536 /* disable interrupt */
2537 fm10k_dev_disable_intr_vf(dev);
2539 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2540 fm10k_dev_interrupt_handler_vf, (void *)dev);
2543 /* free mac memory */
2544 if (dev->data->mac_addrs) {
2545 rte_free(dev->data->mac_addrs);
2546 dev->data->mac_addrs = NULL;
2549 memset(hw, 0, sizeof(*hw));
2555 * The set of PCI devices this driver supports. This driver will enable both PF
2556 * and SRIOV-VF devices.
2558 static const struct rte_pci_id pci_id_fm10k_map[] = {
2559 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2560 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2561 #include "rte_pci_dev_ids.h"
2562 { .vendor_id = 0, /* sentinel */ },
2565 static struct eth_driver rte_pmd_fm10k = {
2567 .name = "rte_pmd_fm10k",
2568 .id_table = pci_id_fm10k_map,
2569 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2571 .eth_dev_init = eth_fm10k_dev_init,
2572 .eth_dev_uninit = eth_fm10k_dev_uninit,
2573 .dev_private_size = sizeof(struct fm10k_adapter),
2577 * Driver initialization routine.
2578 * Invoked once at EAL init time.
2579 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2582 rte_pmd_fm10k_init(__rte_unused const char *name,
2583 __rte_unused const char *params)
2585 PMD_INIT_FUNC_TRACE();
2586 rte_eth_driver_register(&rte_pmd_fm10k);
2590 static struct rte_driver rte_fm10k_driver = {
2592 .init = rte_pmd_fm10k_init,
2595 PMD_REGISTER_DRIVER(rte_fm10k_driver);