4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 /* Number of chars per uint32 type */
50 #define CHARS_PER_UINT32 (sizeof(uint32_t))
51 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
54 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
55 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
56 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
57 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
58 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
61 fm10k_mbx_initlock(struct fm10k_hw *hw)
63 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
67 fm10k_mbx_lock(struct fm10k_hw *hw)
69 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
70 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
74 fm10k_mbx_unlock(struct fm10k_hw *hw)
76 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
80 * reset queue to initial state, allocate software buffers used when starting
83 * return -ENOMEM if buffers cannot be allocated
84 * return -EINVAL if buffers do not satisfy alignment condition
87 rx_queue_reset(struct fm10k_rx_queue *q)
91 PMD_INIT_FUNC_TRACE();
93 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
97 for (i = 0; i < q->nb_desc; ++i) {
98 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
99 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
100 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
104 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
105 q->hw_ring[i].q.pkt_addr = dma_addr;
106 q->hw_ring[i].q.hdr_addr = dma_addr;
111 q->next_trigger = q->alloc_thresh - 1;
112 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
117 * clean queue, descriptor rings, free software buffers used when stopping
121 rx_queue_clean(struct fm10k_rx_queue *q)
123 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
125 PMD_INIT_FUNC_TRACE();
127 /* zero descriptor rings */
128 for (i = 0; i < q->nb_desc; ++i)
129 q->hw_ring[i] = zero;
131 /* free software buffers */
132 for (i = 0; i < q->nb_desc; ++i) {
134 rte_pktmbuf_free_seg(q->sw_ring[i]);
135 q->sw_ring[i] = NULL;
141 * free all queue memory used when releasing the queue (i.e. configure)
144 rx_queue_free(struct fm10k_rx_queue *q)
146 PMD_INIT_FUNC_TRACE();
148 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
151 rte_free(q->sw_ring);
160 * disable RX queue, wait unitl HW finished necessary flush operation
163 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
167 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
168 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
169 reg & ~FM10K_RXQCTL_ENABLE);
171 /* Wait 100us at most */
172 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
174 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
175 if (!(reg & FM10K_RXQCTL_ENABLE))
179 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
186 * reset queue to initial state, allocate software buffers used when starting
190 tx_queue_reset(struct fm10k_tx_queue *q)
192 PMD_INIT_FUNC_TRACE();
196 q->nb_free = q->nb_desc - 1;
197 q->free_trigger = q->nb_free - q->free_thresh;
198 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
199 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
203 * clean queue, descriptor rings, free software buffers used when stopping
207 tx_queue_clean(struct fm10k_tx_queue *q)
209 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
211 PMD_INIT_FUNC_TRACE();
213 /* zero descriptor rings */
214 for (i = 0; i < q->nb_desc; ++i)
215 q->hw_ring[i] = zero;
217 /* free software buffers */
218 for (i = 0; i < q->nb_desc; ++i) {
220 rte_pktmbuf_free_seg(q->sw_ring[i]);
221 q->sw_ring[i] = NULL;
227 * free all queue memory used when releasing the queue (i.e. configure)
230 tx_queue_free(struct fm10k_tx_queue *q)
232 PMD_INIT_FUNC_TRACE();
234 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
236 if (q->rs_tracker.list) {
237 rte_free(q->rs_tracker.list);
238 q->rs_tracker.list = NULL;
241 rte_free(q->sw_ring);
250 * disable TX queue, wait unitl HW finished necessary flush operation
253 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
257 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
258 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
259 reg & ~FM10K_TXDCTL_ENABLE);
261 /* Wait 100us at most */
262 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
264 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
265 if (!(reg & FM10K_TXDCTL_ENABLE))
269 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
276 fm10k_dev_configure(struct rte_eth_dev *dev)
278 PMD_INIT_FUNC_TRACE();
280 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
281 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
287 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
289 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
290 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
291 uint32_t mrqc, *key, i, reta, j;
294 #define RSS_KEY_SIZE 40
295 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
296 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
297 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
298 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
299 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
300 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
303 if (dev->data->nb_rx_queues == 1 ||
304 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
305 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
308 /* random key is rss_intel_key (default) or user provided (rss_key) */
309 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
310 key = (uint32_t *)rss_intel_key;
312 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
314 /* Now fill our hash function seeds, 4 bytes at a time */
315 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
316 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
319 * Fill in redirection table
320 * The byte-swap is needed because NIC registers are in
321 * little-endian order.
324 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
325 if (j == dev->data->nb_rx_queues)
327 reta = (reta << CHAR_BIT) | j;
329 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
334 * Generate RSS hash based on packet types, TCP/UDP
335 * port numbers and/or IPv4/v6 src and dst addresses
337 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
339 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
340 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
341 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
342 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
343 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
344 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
345 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
346 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
347 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
350 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
355 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
359 fm10k_dev_tx_init(struct rte_eth_dev *dev)
361 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
363 struct fm10k_tx_queue *txq;
367 /* Disable TXINT to avoid possible interrupt */
368 for (i = 0; i < hw->mac.max_queues; i++)
369 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
370 3 << FM10K_TXINT_TIMER_SHIFT);
373 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
374 txq = dev->data->tx_queues[i];
375 base_addr = txq->hw_ring_phys_addr;
376 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
378 /* disable queue to avoid issues while updating state */
379 ret = tx_queue_disable(hw, i);
381 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
385 /* set location and size for descriptor ring */
386 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
387 base_addr & UINT64_LOWER_32BITS_MASK);
388 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
389 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
390 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
396 fm10k_dev_rx_init(struct rte_eth_dev *dev)
398 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
400 struct fm10k_rx_queue *rxq;
403 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
406 /* Disable RXINT to avoid possible interrupt */
407 for (i = 0; i < hw->mac.max_queues; i++)
408 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
409 3 << FM10K_RXINT_TIMER_SHIFT);
411 /* Setup RX queues */
412 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
413 rxq = dev->data->rx_queues[i];
414 base_addr = rxq->hw_ring_phys_addr;
415 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
417 /* disable queue to avoid issues while updating state */
418 ret = rx_queue_disable(hw, i);
420 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
424 /* Setup the Base and Length of the Rx Descriptor Ring */
425 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
426 base_addr & UINT64_LOWER_32BITS_MASK);
427 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
428 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
429 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
431 /* Configure the Rx buffer size for one buff without split */
432 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
433 RTE_PKTMBUF_HEADROOM);
434 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
435 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
437 /* It adds dual VLAN length for supporting dual VLAN */
438 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
439 2 * FM10K_VLAN_TAG_SIZE) > buf_size){
440 dev->data->scattered_rx = 1;
441 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
444 /* Enable drop on empty, it's RO for VF */
445 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
446 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
448 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
449 FM10K_WRITE_FLUSH(hw);
452 if (dev->data->dev_conf.rxmode.enable_scatter) {
453 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
454 dev->data->scattered_rx = 1;
457 /* Configure RSS if applicable */
458 fm10k_dev_mq_rx_configure(dev);
463 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
465 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
468 struct fm10k_rx_queue *rxq;
470 PMD_INIT_FUNC_TRACE();
472 if (rx_queue_id < dev->data->nb_rx_queues) {
473 rxq = dev->data->rx_queues[rx_queue_id];
474 err = rx_queue_reset(rxq);
475 if (err == -ENOMEM) {
476 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
478 } else if (err == -EINVAL) {
479 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
484 /* Setup the HW Rx Head and Tail Descriptor Pointers
485 * Note: this must be done AFTER the queue is enabled on real
486 * hardware, but BEFORE the queue is enabled when using the
487 * emulation platform. Do it in both places for now and remove
488 * this comment and the following two register writes when the
489 * emulation platform is no longer being used.
491 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
492 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
494 /* Set PF ownership flag for PF devices */
495 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
496 if (hw->mac.type == fm10k_mac_pf)
497 reg |= FM10K_RXQCTL_PF;
498 reg |= FM10K_RXQCTL_ENABLE;
499 /* enable RX queue */
500 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
501 FM10K_WRITE_FLUSH(hw);
503 /* Setup the HW Rx Head and Tail Descriptor Pointers
504 * Note: this must be done AFTER the queue is enabled
506 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
507 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
514 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
516 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518 PMD_INIT_FUNC_TRACE();
520 if (rx_queue_id < dev->data->nb_rx_queues) {
521 /* Disable RX queue */
522 rx_queue_disable(hw, rx_queue_id);
524 /* Free mbuf and clean HW ring */
525 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
532 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
534 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
535 /** @todo - this should be defined in the shared code */
536 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
537 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
540 PMD_INIT_FUNC_TRACE();
542 if (tx_queue_id < dev->data->nb_tx_queues) {
543 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
545 /* reset head and tail pointers */
546 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
547 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
549 /* enable TX queue */
550 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
551 FM10K_TXDCTL_ENABLE | txdctl);
552 FM10K_WRITE_FLUSH(hw);
560 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
562 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 PMD_INIT_FUNC_TRACE();
566 if (tx_queue_id < dev->data->nb_tx_queues) {
567 tx_queue_disable(hw, tx_queue_id);
568 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
574 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
576 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
577 != FM10K_DGLORTMAP_NONE);
581 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
583 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586 PMD_INIT_FUNC_TRACE();
588 /* Return if it didn't acquire valid glort range */
589 if (!fm10k_glort_valid(hw))
593 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
594 FM10K_XCAST_MODE_PROMISC);
595 fm10k_mbx_unlock(hw);
597 if (status != FM10K_SUCCESS)
598 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
602 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
604 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 PMD_INIT_FUNC_TRACE();
610 /* Return if it didn't acquire valid glort range */
611 if (!fm10k_glort_valid(hw))
614 if (dev->data->all_multicast == 1)
615 mode = FM10K_XCAST_MODE_ALLMULTI;
617 mode = FM10K_XCAST_MODE_NONE;
620 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
622 fm10k_mbx_unlock(hw);
624 if (status != FM10K_SUCCESS)
625 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
629 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
631 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634 PMD_INIT_FUNC_TRACE();
636 /* Return if it didn't acquire valid glort range */
637 if (!fm10k_glort_valid(hw))
640 /* If promiscuous mode is enabled, it doesn't make sense to enable
641 * allmulticast and disable promiscuous since fm10k only can select
644 if (dev->data->promiscuous) {
645 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
646 "needn't enable allmulticast");
651 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
652 FM10K_XCAST_MODE_ALLMULTI);
653 fm10k_mbx_unlock(hw);
655 if (status != FM10K_SUCCESS)
656 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
660 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
662 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
665 PMD_INIT_FUNC_TRACE();
667 /* Return if it didn't acquire valid glort range */
668 if (!fm10k_glort_valid(hw))
671 if (dev->data->promiscuous) {
672 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
673 "since promisc mode is enabled");
678 /* Change mode to unicast mode */
679 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
680 FM10K_XCAST_MODE_NONE);
681 fm10k_mbx_unlock(hw);
683 if (status != FM10K_SUCCESS)
684 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
687 /* fls = find last set bit = 32 minus the number of leading zeros */
689 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
691 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
693 fm10k_dev_start(struct rte_eth_dev *dev)
695 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
698 PMD_INIT_FUNC_TRACE();
700 /* stop, init, then start the hw */
701 diag = fm10k_stop_hw(hw);
702 if (diag != FM10K_SUCCESS) {
703 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
707 diag = fm10k_init_hw(hw);
708 if (diag != FM10K_SUCCESS) {
709 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
713 diag = fm10k_start_hw(hw);
714 if (diag != FM10K_SUCCESS) {
715 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
719 diag = fm10k_dev_tx_init(dev);
721 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
725 diag = fm10k_dev_rx_init(dev);
727 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
731 if (hw->mac.type == fm10k_mac_pf) {
732 /* Establish only VSI 0 as valid */
733 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
735 /* Configure RSS bits used in RETA table */
736 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
737 fls(dev->data->nb_rx_queues - 1) <<
738 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
740 /* Invalidate all other GLORT entries */
741 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
742 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
743 FM10K_DGLORTMAP_NONE);
746 for (i = 0; i < dev->data->nb_rx_queues; i++) {
747 struct fm10k_rx_queue *rxq;
748 rxq = dev->data->rx_queues[i];
750 if (rxq->rx_deferred_start)
752 diag = fm10k_dev_rx_queue_start(dev, i);
755 for (j = 0; j < i; ++j)
756 rx_queue_clean(dev->data->rx_queues[j]);
761 for (i = 0; i < dev->data->nb_tx_queues; i++) {
762 struct fm10k_tx_queue *txq;
763 txq = dev->data->tx_queues[i];
765 if (txq->tx_deferred_start)
767 diag = fm10k_dev_tx_queue_start(dev, i);
770 for (j = 0; j < dev->data->nb_rx_queues; ++j)
771 rx_queue_clean(dev->data->rx_queues[j]);
780 fm10k_dev_stop(struct rte_eth_dev *dev)
784 PMD_INIT_FUNC_TRACE();
786 for (i = 0; i < dev->data->nb_tx_queues; i++)
787 fm10k_dev_tx_queue_stop(dev, i);
789 for (i = 0; i < dev->data->nb_rx_queues; i++)
790 fm10k_dev_rx_queue_stop(dev, i);
794 fm10k_dev_close(struct rte_eth_dev *dev)
796 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 PMD_INIT_FUNC_TRACE();
800 /* Stop mailbox service first */
801 fm10k_close_mbx_service(hw);
807 fm10k_link_update(struct rte_eth_dev *dev,
808 __rte_unused int wait_to_complete)
810 PMD_INIT_FUNC_TRACE();
812 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
813 * x8 PCIe interface. For now, we leave the speed undefined since there
814 * is no 50Gbps Ethernet. */
815 dev->data->dev_link.link_speed = 0;
816 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
817 dev->data->dev_link.link_status = 1;
823 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
825 uint64_t ipackets, opackets, ibytes, obytes;
826 struct fm10k_hw *hw =
827 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
828 struct fm10k_hw_stats *hw_stats =
829 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
832 PMD_INIT_FUNC_TRACE();
834 fm10k_update_hw_stats(hw, hw_stats);
836 ipackets = opackets = ibytes = obytes = 0;
837 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
838 (i < hw->mac.max_queues); ++i) {
839 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
840 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
841 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
842 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
843 ipackets += stats->q_ipackets[i];
844 opackets += stats->q_opackets[i];
845 ibytes += stats->q_ibytes[i];
846 obytes += stats->q_obytes[i];
848 stats->ipackets = ipackets;
849 stats->opackets = opackets;
850 stats->ibytes = ibytes;
851 stats->obytes = obytes;
855 fm10k_stats_reset(struct rte_eth_dev *dev)
857 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 struct fm10k_hw_stats *hw_stats =
859 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
861 PMD_INIT_FUNC_TRACE();
863 memset(hw_stats, 0, sizeof(*hw_stats));
864 fm10k_rebind_hw_stats(hw, hw_stats);
868 fm10k_dev_infos_get(struct rte_eth_dev *dev,
869 struct rte_eth_dev_info *dev_info)
871 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873 PMD_INIT_FUNC_TRACE();
875 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
876 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
877 dev_info->max_rx_queues = hw->mac.max_queues;
878 dev_info->max_tx_queues = hw->mac.max_queues;
879 dev_info->max_mac_addrs = 1;
880 dev_info->max_hash_mac_addrs = 0;
881 dev_info->max_vfs = dev->pci_dev->max_vfs;
882 dev_info->max_vmdq_pools = ETH_64_POOLS;
883 dev_info->rx_offload_capa =
884 DEV_RX_OFFLOAD_IPV4_CKSUM |
885 DEV_RX_OFFLOAD_UDP_CKSUM |
886 DEV_RX_OFFLOAD_TCP_CKSUM;
887 dev_info->tx_offload_capa = 0;
888 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
890 dev_info->default_rxconf = (struct rte_eth_rxconf) {
892 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
893 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
894 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
896 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
900 dev_info->default_txconf = (struct rte_eth_txconf) {
902 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
903 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
904 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
906 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
907 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
908 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
909 ETH_TXQ_FLAGS_NOOFFLOADS,
915 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
917 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 PMD_INIT_FUNC_TRACE();
921 /* @todo - add support for the VF */
922 if (hw->mac.type != fm10k_mac_pf)
925 return fm10k_update_vlan(hw, vlan_id, 0, on);
929 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
931 if ((request < min) || (request > max) || ((request % mult) != 0))
938 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
939 * the physical address is required. If the memzone is already created, then
940 * this function returns a pointer to the existing memzone.
942 static inline const struct rte_memzone *
943 allocate_hw_ring(const char *driver_name, const char *ring_name,
944 uint8_t port_id, uint16_t queue_id, int socket_id,
945 uint32_t size, uint32_t align)
947 char name[RTE_MEMZONE_NAMESIZE];
948 const struct rte_memzone *mz;
950 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
951 driver_name, ring_name, port_id, queue_id, socket_id);
953 /* return the memzone if it already exists */
954 mz = rte_memzone_lookup(name);
958 #ifdef RTE_LIBRTE_XEN_DOM0
959 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
962 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
967 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
969 if ((request < min) || (request > max) || ((div % request) != 0))
976 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
978 uint16_t rx_free_thresh;
980 if (conf->rx_free_thresh == 0)
981 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
983 rx_free_thresh = conf->rx_free_thresh;
985 /* make sure the requested threshold satisfies the constraints */
986 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
987 FM10K_RX_FREE_THRESH_MAX(q),
988 FM10K_RX_FREE_THRESH_DIV(q),
990 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
991 "less than or equal to %u, "
992 "greater than or equal to %u, "
993 "and a divisor of %u",
994 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
995 FM10K_RX_FREE_THRESH_MIN(q),
996 FM10K_RX_FREE_THRESH_DIV(q));
1000 q->alloc_thresh = rx_free_thresh;
1001 q->drop_en = conf->rx_drop_en;
1002 q->rx_deferred_start = conf->rx_deferred_start;
1008 * Hardware requires specific alignment for Rx packet buffers. At
1009 * least one of the following two conditions must be satisfied.
1010 * 1. Address is 512B aligned
1011 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1013 * As such, the driver may need to adjust the DMA address within the
1014 * buffer by up to 512B.
1016 * return 1 if the element size is valid, otherwise return 0.
1019 mempool_element_size_valid(struct rte_mempool *mp)
1023 /* elt_size includes mbuf header and headroom */
1024 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1025 RTE_PKTMBUF_HEADROOM;
1027 /* account for up to 512B of alignment */
1028 min_size -= FM10K_RX_BUFF_ALIGN;
1030 /* sanity check for overflow */
1031 if (min_size > mp->elt_size)
1039 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1040 uint16_t nb_desc, unsigned int socket_id,
1041 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1043 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1044 struct fm10k_rx_queue *q;
1045 const struct rte_memzone *mz;
1047 PMD_INIT_FUNC_TRACE();
1049 /* make sure the mempool element size can account for alignment. */
1050 if (!mempool_element_size_valid(mp)) {
1051 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1055 /* make sure a valid number of descriptors have been requested */
1056 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1057 FM10K_MULT_RX_DESC, nb_desc)) {
1058 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1059 "less than or equal to %"PRIu32", "
1060 "greater than or equal to %u, "
1061 "and a multiple of %u",
1062 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1063 FM10K_MULT_RX_DESC);
1068 * if this queue existed already, free the associated memory. The
1069 * queue cannot be reused in case we need to allocate memory on
1070 * different socket than was previously used.
1072 if (dev->data->rx_queues[queue_id] != NULL) {
1073 rx_queue_free(dev->data->rx_queues[queue_id]);
1074 dev->data->rx_queues[queue_id] = NULL;
1077 /* allocate memory for the queue structure */
1078 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1081 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1087 q->nb_desc = nb_desc;
1088 q->port_id = dev->data->port_id;
1089 q->queue_id = queue_id;
1090 q->tail_ptr = (volatile uint32_t *)
1091 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1092 if (handle_rxconf(q, conf))
1095 /* allocate memory for the software ring */
1096 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1097 nb_desc * sizeof(struct rte_mbuf *),
1098 RTE_CACHE_LINE_SIZE, socket_id);
1099 if (q->sw_ring == NULL) {
1100 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1106 * allocate memory for the hardware descriptor ring. A memzone large
1107 * enough to hold the maximum ring size is requested to allow for
1108 * resizing in later calls to the queue setup function.
1110 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1111 dev->data->port_id, queue_id, socket_id,
1112 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1114 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1115 rte_free(q->sw_ring);
1119 q->hw_ring = mz->addr;
1120 q->hw_ring_phys_addr = mz->phys_addr;
1122 dev->data->rx_queues[queue_id] = q;
1127 fm10k_rx_queue_release(void *queue)
1129 PMD_INIT_FUNC_TRACE();
1131 rx_queue_free(queue);
1135 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1137 uint16_t tx_free_thresh;
1138 uint16_t tx_rs_thresh;
1140 /* constraint MACROs require that tx_free_thresh is configured
1141 * before tx_rs_thresh */
1142 if (conf->tx_free_thresh == 0)
1143 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1145 tx_free_thresh = conf->tx_free_thresh;
1147 /* make sure the requested threshold satisfies the constraints */
1148 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1149 FM10K_TX_FREE_THRESH_MAX(q),
1150 FM10K_TX_FREE_THRESH_DIV(q),
1152 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1153 "less than or equal to %u, "
1154 "greater than or equal to %u, "
1155 "and a divisor of %u",
1156 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1157 FM10K_TX_FREE_THRESH_MIN(q),
1158 FM10K_TX_FREE_THRESH_DIV(q));
1162 q->free_thresh = tx_free_thresh;
1164 if (conf->tx_rs_thresh == 0)
1165 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1167 tx_rs_thresh = conf->tx_rs_thresh;
1169 q->tx_deferred_start = conf->tx_deferred_start;
1171 /* make sure the requested threshold satisfies the constraints */
1172 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1173 FM10K_TX_RS_THRESH_MAX(q),
1174 FM10K_TX_RS_THRESH_DIV(q),
1176 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1177 "less than or equal to %u, "
1178 "greater than or equal to %u, "
1179 "and a divisor of %u",
1180 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1181 FM10K_TX_RS_THRESH_MIN(q),
1182 FM10K_TX_RS_THRESH_DIV(q));
1186 q->rs_thresh = tx_rs_thresh;
1192 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1193 uint16_t nb_desc, unsigned int socket_id,
1194 const struct rte_eth_txconf *conf)
1196 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1197 struct fm10k_tx_queue *q;
1198 const struct rte_memzone *mz;
1200 PMD_INIT_FUNC_TRACE();
1202 /* make sure a valid number of descriptors have been requested */
1203 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1204 FM10K_MULT_TX_DESC, nb_desc)) {
1205 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1206 "less than or equal to %"PRIu32", "
1207 "greater than or equal to %u, "
1208 "and a multiple of %u",
1209 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1210 FM10K_MULT_TX_DESC);
1215 * if this queue existed already, free the associated memory. The
1216 * queue cannot be reused in case we need to allocate memory on
1217 * different socket than was previously used.
1219 if (dev->data->tx_queues[queue_id] != NULL) {
1220 tx_queue_free(dev->data->tx_queues[queue_id]);
1221 dev->data->tx_queues[queue_id] = NULL;
1224 /* allocate memory for the queue structure */
1225 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1228 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1233 q->nb_desc = nb_desc;
1234 q->port_id = dev->data->port_id;
1235 q->queue_id = queue_id;
1236 q->tail_ptr = (volatile uint32_t *)
1237 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1238 if (handle_txconf(q, conf))
1241 /* allocate memory for the software ring */
1242 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1243 nb_desc * sizeof(struct rte_mbuf *),
1244 RTE_CACHE_LINE_SIZE, socket_id);
1245 if (q->sw_ring == NULL) {
1246 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1252 * allocate memory for the hardware descriptor ring. A memzone large
1253 * enough to hold the maximum ring size is requested to allow for
1254 * resizing in later calls to the queue setup function.
1256 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1257 dev->data->port_id, queue_id, socket_id,
1258 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1260 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1261 rte_free(q->sw_ring);
1265 q->hw_ring = mz->addr;
1266 q->hw_ring_phys_addr = mz->phys_addr;
1269 * allocate memory for the RS bit tracker. Enough slots to hold the
1270 * descriptor index for each RS bit needing to be set are required.
1272 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1273 ((nb_desc + 1) / q->rs_thresh) *
1275 RTE_CACHE_LINE_SIZE, socket_id);
1276 if (q->rs_tracker.list == NULL) {
1277 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1278 rte_free(q->sw_ring);
1283 dev->data->tx_queues[queue_id] = q;
1288 fm10k_tx_queue_release(void *queue)
1290 PMD_INIT_FUNC_TRACE();
1292 tx_queue_free(queue);
1296 fm10k_reta_update(struct rte_eth_dev *dev,
1297 struct rte_eth_rss_reta_entry64 *reta_conf,
1300 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1301 uint16_t i, j, idx, shift;
1305 PMD_INIT_FUNC_TRACE();
1307 if (reta_size > FM10K_MAX_RSS_INDICES) {
1308 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1309 "(%d) doesn't match the number hardware can supported "
1310 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1315 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1316 * 128-entries in 32 registers
1318 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1319 idx = i / RTE_RETA_GROUP_SIZE;
1320 shift = i % RTE_RETA_GROUP_SIZE;
1321 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1322 BIT_MASK_PER_UINT32);
1327 if (mask != BIT_MASK_PER_UINT32)
1328 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1330 for (j = 0; j < CHARS_PER_UINT32; j++) {
1331 if (mask & (0x1 << j)) {
1333 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1334 reta |= reta_conf[idx].reta[shift + j] <<
1338 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1345 fm10k_reta_query(struct rte_eth_dev *dev,
1346 struct rte_eth_rss_reta_entry64 *reta_conf,
1349 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1350 uint16_t i, j, idx, shift;
1354 PMD_INIT_FUNC_TRACE();
1356 if (reta_size < FM10K_MAX_RSS_INDICES) {
1357 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1358 "(%d) doesn't match the number hardware can supported "
1359 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1364 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1365 * 128-entries in 32 registers
1367 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1368 idx = i / RTE_RETA_GROUP_SIZE;
1369 shift = i % RTE_RETA_GROUP_SIZE;
1370 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1371 BIT_MASK_PER_UINT32);
1375 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1376 for (j = 0; j < CHARS_PER_UINT32; j++) {
1377 if (mask & (0x1 << j))
1378 reta_conf[idx].reta[shift + j] = ((reta >>
1379 CHAR_BIT * j) & UINT8_MAX);
1387 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1388 struct rte_eth_rss_conf *rss_conf)
1390 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1391 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1393 uint64_t hf = rss_conf->rss_hf;
1396 PMD_INIT_FUNC_TRACE();
1398 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1399 FM10K_RSSRK_ENTRIES_PER_REG)
1406 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1407 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1408 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1409 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1410 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1411 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1412 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1413 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1414 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1416 /* If the mapping doesn't fit any supported, return */
1421 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1422 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1424 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1430 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1431 struct rte_eth_rss_conf *rss_conf)
1433 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1434 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1439 PMD_INIT_FUNC_TRACE();
1441 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1442 FM10K_RSSRK_ENTRIES_PER_REG)
1446 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1447 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1449 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1451 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1452 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1453 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1454 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1455 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1456 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1457 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1458 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1459 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1461 rss_conf->rss_hf = hf;
1467 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1469 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1470 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1472 /* Bind all local non-queue interrupt to vector 0 */
1475 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1476 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1477 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1478 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1479 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1480 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1482 /* Enable misc causes */
1483 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1484 FM10K_EIMR_ENABLE(THI_FAULT) |
1485 FM10K_EIMR_ENABLE(FUM_FAULT) |
1486 FM10K_EIMR_ENABLE(MAILBOX) |
1487 FM10K_EIMR_ENABLE(SWITCHREADY) |
1488 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1489 FM10K_EIMR_ENABLE(SRAMERROR) |
1490 FM10K_EIMR_ENABLE(VFLR));
1493 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1494 FM10K_ITR_MASK_CLEAR);
1495 FM10K_WRITE_FLUSH(hw);
1499 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1501 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1502 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1504 /* Bind all local non-queue interrupt to vector 0 */
1507 /* Only INT 0 available, other 15 are reserved. */
1508 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1511 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1512 FM10K_ITR_MASK_CLEAR);
1513 FM10K_WRITE_FLUSH(hw);
1517 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1519 struct fm10k_fault fault;
1521 const char *estr = "Unknown error";
1523 /* Process PCA fault */
1524 if (eicr & FM10K_EIMR_PCA_FAULT) {
1525 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1528 switch (fault.type) {
1530 estr = "PCA_NO_FAULT"; break;
1531 case PCA_UNMAPPED_ADDR:
1532 estr = "PCA_UNMAPPED_ADDR"; break;
1533 case PCA_BAD_QACCESS_PF:
1534 estr = "PCA_BAD_QACCESS_PF"; break;
1535 case PCA_BAD_QACCESS_VF:
1536 estr = "PCA_BAD_QACCESS_VF"; break;
1537 case PCA_MALICIOUS_REQ:
1538 estr = "PCA_MALICIOUS_REQ"; break;
1539 case PCA_POISONED_TLP:
1540 estr = "PCA_POISONED_TLP"; break;
1542 estr = "PCA_TLP_ABORT"; break;
1546 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1547 estr, fault.func ? "VF" : "PF", fault.func,
1548 fault.address, fault.specinfo);
1551 /* Process THI fault */
1552 if (eicr & FM10K_EIMR_THI_FAULT) {
1553 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1556 switch (fault.type) {
1558 estr = "THI_NO_FAULT"; break;
1559 case THI_MAL_DIS_Q_FAULT:
1560 estr = "THI_MAL_DIS_Q_FAULT"; break;
1564 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1565 estr, fault.func ? "VF" : "PF", fault.func,
1566 fault.address, fault.specinfo);
1569 /* Process FUM fault */
1570 if (eicr & FM10K_EIMR_FUM_FAULT) {
1571 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1574 switch (fault.type) {
1576 estr = "FUM_NO_FAULT"; break;
1577 case FUM_UNMAPPED_ADDR:
1578 estr = "FUM_UNMAPPED_ADDR"; break;
1579 case FUM_POISONED_TLP:
1580 estr = "FUM_POISONED_TLP"; break;
1581 case FUM_BAD_VF_QACCESS:
1582 estr = "FUM_BAD_VF_QACCESS"; break;
1583 case FUM_ADD_DECODE_ERR:
1584 estr = "FUM_ADD_DECODE_ERR"; break;
1586 estr = "FUM_RO_ERROR"; break;
1587 case FUM_QPRC_CRC_ERROR:
1588 estr = "FUM_QPRC_CRC_ERROR"; break;
1589 case FUM_CSR_TIMEOUT:
1590 estr = "FUM_CSR_TIMEOUT"; break;
1591 case FUM_INVALID_TYPE:
1592 estr = "FUM_INVALID_TYPE"; break;
1593 case FUM_INVALID_LENGTH:
1594 estr = "FUM_INVALID_LENGTH"; break;
1595 case FUM_INVALID_BE:
1596 estr = "FUM_INVALID_BE"; break;
1597 case FUM_INVALID_ALIGN:
1598 estr = "FUM_INVALID_ALIGN"; break;
1602 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1603 estr, fault.func ? "VF" : "PF", fault.func,
1604 fault.address, fault.specinfo);
1611 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1616 * PF interrupt handler triggered by NIC for handling specific interrupt.
1619 * Pointer to interrupt handle.
1621 * The address of parameter (struct rte_eth_dev *) regsitered before.
1627 fm10k_dev_interrupt_handler_pf(
1628 __rte_unused struct rte_intr_handle *handle,
1631 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1632 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1633 uint32_t cause, status;
1635 if (hw->mac.type != fm10k_mac_pf)
1638 cause = FM10K_READ_REG(hw, FM10K_EICR);
1640 /* Handle PCI fault cases */
1641 if (cause & FM10K_EICR_FAULT_MASK) {
1642 PMD_INIT_LOG(ERR, "INT: find fault!");
1643 fm10k_dev_handle_fault(hw, cause);
1646 /* Handle switch up/down */
1647 if (cause & FM10K_EICR_SWITCHNOTREADY)
1648 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1650 if (cause & FM10K_EICR_SWITCHREADY)
1651 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1653 /* Handle mailbox message */
1655 hw->mbx.ops.process(hw, &hw->mbx);
1656 fm10k_mbx_unlock(hw);
1658 /* Handle SRAM error */
1659 if (cause & FM10K_EICR_SRAMERROR) {
1660 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1662 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1663 /* Write to clear pending bits */
1664 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1666 /* Todo: print out error message after shared code updates */
1669 /* Clear these 3 events if having any */
1670 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1671 FM10K_EICR_SWITCHREADY;
1673 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1675 /* Re-enable interrupt from device side */
1676 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1677 FM10K_ITR_MASK_CLEAR);
1678 /* Re-enable interrupt from host side */
1679 rte_intr_enable(&(dev->pci_dev->intr_handle));
1683 * VF interrupt handler triggered by NIC for handling specific interrupt.
1686 * Pointer to interrupt handle.
1688 * The address of parameter (struct rte_eth_dev *) regsitered before.
1694 fm10k_dev_interrupt_handler_vf(
1695 __rte_unused struct rte_intr_handle *handle,
1698 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1699 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1701 if (hw->mac.type != fm10k_mac_vf)
1704 /* Handle mailbox message if lock is acquired */
1706 hw->mbx.ops.process(hw, &hw->mbx);
1707 fm10k_mbx_unlock(hw);
1709 /* Re-enable interrupt from device side */
1710 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1711 FM10K_ITR_MASK_CLEAR);
1712 /* Re-enable interrupt from host side */
1713 rte_intr_enable(&(dev->pci_dev->intr_handle));
1716 /* Mailbox message handler in VF */
1717 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1718 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1719 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1720 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1721 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1724 /* Mailbox message handler in PF */
1725 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1726 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1727 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1728 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1729 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1730 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1731 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1732 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1736 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1740 /* Initialize mailbox lock */
1741 fm10k_mbx_initlock(hw);
1743 /* Replace default message handler with new ones */
1744 if (hw->mac.type == fm10k_mac_pf)
1745 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1747 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1750 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1754 /* Connect to SM for PF device or PF for VF device */
1755 return hw->mbx.ops.connect(hw, &hw->mbx);
1759 fm10k_close_mbx_service(struct fm10k_hw *hw)
1761 /* Disconnect from SM for PF device or PF for VF device */
1762 hw->mbx.ops.disconnect(hw, &hw->mbx);
1765 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1766 .dev_configure = fm10k_dev_configure,
1767 .dev_start = fm10k_dev_start,
1768 .dev_stop = fm10k_dev_stop,
1769 .dev_close = fm10k_dev_close,
1770 .promiscuous_enable = fm10k_dev_promiscuous_enable,
1771 .promiscuous_disable = fm10k_dev_promiscuous_disable,
1772 .allmulticast_enable = fm10k_dev_allmulticast_enable,
1773 .allmulticast_disable = fm10k_dev_allmulticast_disable,
1774 .stats_get = fm10k_stats_get,
1775 .stats_reset = fm10k_stats_reset,
1776 .link_update = fm10k_link_update,
1777 .dev_infos_get = fm10k_dev_infos_get,
1778 .vlan_filter_set = fm10k_vlan_filter_set,
1779 .rx_queue_start = fm10k_dev_rx_queue_start,
1780 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1781 .tx_queue_start = fm10k_dev_tx_queue_start,
1782 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1783 .rx_queue_setup = fm10k_rx_queue_setup,
1784 .rx_queue_release = fm10k_rx_queue_release,
1785 .tx_queue_setup = fm10k_tx_queue_setup,
1786 .tx_queue_release = fm10k_tx_queue_release,
1787 .reta_update = fm10k_reta_update,
1788 .reta_query = fm10k_reta_query,
1789 .rss_hash_update = fm10k_rss_hash_update,
1790 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1794 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1796 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1799 PMD_INIT_FUNC_TRACE();
1801 dev->dev_ops = &fm10k_eth_dev_ops;
1802 dev->rx_pkt_burst = &fm10k_recv_pkts;
1803 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1805 if (dev->data->scattered_rx)
1806 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
1808 /* only initialize in the primary process */
1809 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1812 /* Vendor and Device ID need to be set before init of shared code */
1813 memset(hw, 0, sizeof(*hw));
1814 hw->device_id = dev->pci_dev->id.device_id;
1815 hw->vendor_id = dev->pci_dev->id.vendor_id;
1816 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1817 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1818 hw->revision_id = 0;
1819 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1820 if (hw->hw_addr == NULL) {
1821 PMD_INIT_LOG(ERR, "Bad mem resource."
1822 " Try to blacklist unused devices.");
1826 /* Store fm10k_adapter pointer */
1827 hw->back = dev->data->dev_private;
1829 /* Initialize the shared code */
1830 diag = fm10k_init_shared_code(hw);
1831 if (diag != FM10K_SUCCESS) {
1832 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1837 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1838 * there is no way to get link status without reading BAR4. Until this
1839 * works, assume we have maximum bandwidth.
1840 * @todo - fix bus info
1842 hw->bus_caps.speed = fm10k_bus_speed_8000;
1843 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1844 hw->bus_caps.payload = fm10k_bus_payload_512;
1845 hw->bus.speed = fm10k_bus_speed_8000;
1846 hw->bus.width = fm10k_bus_width_pcie_x8;
1847 hw->bus.payload = fm10k_bus_payload_256;
1849 /* Initialize the hw */
1850 diag = fm10k_init_hw(hw);
1851 if (diag != FM10K_SUCCESS) {
1852 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1856 /* Initialize MAC address(es) */
1857 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1858 if (dev->data->mac_addrs == NULL) {
1859 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1863 diag = fm10k_read_mac_addr(hw);
1864 if (diag != FM10K_SUCCESS) {
1866 * TODO: remove special handling on VF. Need shared code to
1869 if (hw->mac.type == fm10k_mac_pf) {
1870 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1873 /* Generate a random addr */
1874 eth_random_addr(hw->mac.addr);
1875 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1879 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1880 &dev->data->mac_addrs[0]);
1882 /* Reset the hw statistics */
1883 fm10k_stats_reset(dev);
1886 diag = fm10k_reset_hw(hw);
1887 if (diag != FM10K_SUCCESS) {
1888 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1892 /* Setup mailbox service */
1893 diag = fm10k_setup_mbx_service(hw);
1894 if (diag != FM10K_SUCCESS) {
1895 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1899 /*PF/VF has different interrupt handling mechanism */
1900 if (hw->mac.type == fm10k_mac_pf) {
1901 /* register callback func to eal lib */
1902 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1903 fm10k_dev_interrupt_handler_pf, (void *)dev);
1905 /* enable MISC interrupt */
1906 fm10k_dev_enable_intr_pf(dev);
1908 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1909 fm10k_dev_interrupt_handler_vf, (void *)dev);
1911 fm10k_dev_enable_intr_vf(dev);
1915 * Below function will trigger operations on mailbox, acquire lock to
1916 * avoid race condition from interrupt handler. Operations on mailbox
1917 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1918 * will handle and generate an interrupt to our side. Then, FIFO in
1919 * mailbox will be touched.
1922 /* Enable port first */
1923 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1925 /* Update default vlan */
1926 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1929 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1930 * unused for VF. PF will assign correct glort for VF.
1932 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1933 hw->mac.default_vid, 1, 0);
1935 /* Set unicast mode by default. App can change to other mode in other
1938 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1939 FM10K_XCAST_MODE_NONE);
1941 fm10k_mbx_unlock(hw);
1943 /* enable uio intr after callback registered */
1944 rte_intr_enable(&(dev->pci_dev->intr_handle));
1950 * The set of PCI devices this driver supports. This driver will enable both PF
1951 * and SRIOV-VF devices.
1953 static const struct rte_pci_id pci_id_fm10k_map[] = {
1954 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1955 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1956 #include "rte_pci_dev_ids.h"
1957 { .vendor_id = 0, /* sentinel */ },
1960 static struct eth_driver rte_pmd_fm10k = {
1962 .name = "rte_pmd_fm10k",
1963 .id_table = pci_id_fm10k_map,
1964 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1966 .eth_dev_init = eth_fm10k_dev_init,
1967 .dev_private_size = sizeof(struct fm10k_adapter),
1971 * Driver initialization routine.
1972 * Invoked once at EAL init time.
1973 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1976 rte_pmd_fm10k_init(__rte_unused const char *name,
1977 __rte_unused const char *params)
1979 PMD_INIT_FUNC_TRACE();
1980 rte_eth_driver_register(&rte_pmd_fm10k);
1984 static struct rte_driver rte_fm10k_driver = {
1986 .init = rte_pmd_fm10k_init,
1989 PMD_REGISTER_DRIVER(rte_fm10k_driver);