4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 /* Number of chars per uint32 type */
50 #define CHARS_PER_UINT32 (sizeof(uint32_t))
51 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
54 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
55 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
56 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
57 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
58 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
61 fm10k_mbx_initlock(struct fm10k_hw *hw)
63 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
67 fm10k_mbx_lock(struct fm10k_hw *hw)
69 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
70 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
74 fm10k_mbx_unlock(struct fm10k_hw *hw)
76 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
80 * reset queue to initial state, allocate software buffers used when starting
83 * return -ENOMEM if buffers cannot be allocated
84 * return -EINVAL if buffers do not satisfy alignment condition
87 rx_queue_reset(struct fm10k_rx_queue *q)
91 PMD_INIT_FUNC_TRACE();
93 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
97 for (i = 0; i < q->nb_desc; ++i) {
98 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
99 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
100 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
104 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
105 q->hw_ring[i].q.pkt_addr = dma_addr;
106 q->hw_ring[i].q.hdr_addr = dma_addr;
111 q->next_trigger = q->alloc_thresh - 1;
112 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
117 * clean queue, descriptor rings, free software buffers used when stopping
121 rx_queue_clean(struct fm10k_rx_queue *q)
123 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
125 PMD_INIT_FUNC_TRACE();
127 /* zero descriptor rings */
128 for (i = 0; i < q->nb_desc; ++i)
129 q->hw_ring[i] = zero;
131 /* free software buffers */
132 for (i = 0; i < q->nb_desc; ++i) {
134 rte_pktmbuf_free_seg(q->sw_ring[i]);
135 q->sw_ring[i] = NULL;
141 * free all queue memory used when releasing the queue (i.e. configure)
144 rx_queue_free(struct fm10k_rx_queue *q)
146 PMD_INIT_FUNC_TRACE();
148 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
151 rte_free(q->sw_ring);
160 * disable RX queue, wait unitl HW finished necessary flush operation
163 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
167 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
168 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
169 reg & ~FM10K_RXQCTL_ENABLE);
171 /* Wait 100us at most */
172 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
174 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
175 if (!(reg & FM10K_RXQCTL_ENABLE))
179 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
186 * reset queue to initial state, allocate software buffers used when starting
190 tx_queue_reset(struct fm10k_tx_queue *q)
192 PMD_INIT_FUNC_TRACE();
196 q->nb_free = q->nb_desc - 1;
197 q->free_trigger = q->nb_free - q->free_thresh;
198 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
199 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
203 * clean queue, descriptor rings, free software buffers used when stopping
207 tx_queue_clean(struct fm10k_tx_queue *q)
209 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
211 PMD_INIT_FUNC_TRACE();
213 /* zero descriptor rings */
214 for (i = 0; i < q->nb_desc; ++i)
215 q->hw_ring[i] = zero;
217 /* free software buffers */
218 for (i = 0; i < q->nb_desc; ++i) {
220 rte_pktmbuf_free_seg(q->sw_ring[i]);
221 q->sw_ring[i] = NULL;
227 * free all queue memory used when releasing the queue (i.e. configure)
230 tx_queue_free(struct fm10k_tx_queue *q)
232 PMD_INIT_FUNC_TRACE();
234 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
236 if (q->rs_tracker.list) {
237 rte_free(q->rs_tracker.list);
238 q->rs_tracker.list = NULL;
241 rte_free(q->sw_ring);
250 * disable TX queue, wait unitl HW finished necessary flush operation
253 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
257 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
258 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
259 reg & ~FM10K_TXDCTL_ENABLE);
261 /* Wait 100us at most */
262 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
264 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
265 if (!(reg & FM10K_TXDCTL_ENABLE))
269 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
276 fm10k_dev_configure(struct rte_eth_dev *dev)
278 PMD_INIT_FUNC_TRACE();
280 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
281 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
287 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
289 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
290 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
291 uint32_t mrqc, *key, i, reta, j;
294 #define RSS_KEY_SIZE 40
295 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
296 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
297 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
298 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
299 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
300 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
303 if (dev->data->nb_rx_queues == 1 ||
304 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
305 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
308 /* random key is rss_intel_key (default) or user provided (rss_key) */
309 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
310 key = (uint32_t *)rss_intel_key;
312 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
314 /* Now fill our hash function seeds, 4 bytes at a time */
315 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
316 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
319 * Fill in redirection table
320 * The byte-swap is needed because NIC registers are in
321 * little-endian order.
324 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
325 if (j == dev->data->nb_rx_queues)
327 reta = (reta << CHAR_BIT) | j;
329 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
334 * Generate RSS hash based on packet types, TCP/UDP
335 * port numbers and/or IPv4/v6 src and dst addresses
337 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
339 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
340 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
341 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
342 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
343 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
344 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
345 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
346 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
347 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
350 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
355 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
359 fm10k_dev_tx_init(struct rte_eth_dev *dev)
361 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
363 struct fm10k_tx_queue *txq;
367 /* Disable TXINT to avoid possible interrupt */
368 for (i = 0; i < hw->mac.max_queues; i++)
369 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
370 3 << FM10K_TXINT_TIMER_SHIFT);
373 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
374 txq = dev->data->tx_queues[i];
375 base_addr = txq->hw_ring_phys_addr;
376 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
378 /* disable queue to avoid issues while updating state */
379 ret = tx_queue_disable(hw, i);
381 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
385 /* set location and size for descriptor ring */
386 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
387 base_addr & UINT64_LOWER_32BITS_MASK);
388 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
389 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
390 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
396 fm10k_dev_rx_init(struct rte_eth_dev *dev)
398 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
400 struct fm10k_rx_queue *rxq;
403 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
406 /* Disable RXINT to avoid possible interrupt */
407 for (i = 0; i < hw->mac.max_queues; i++)
408 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
409 3 << FM10K_RXINT_TIMER_SHIFT);
411 /* Setup RX queues */
412 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
413 rxq = dev->data->rx_queues[i];
414 base_addr = rxq->hw_ring_phys_addr;
415 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
417 /* disable queue to avoid issues while updating state */
418 ret = rx_queue_disable(hw, i);
420 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
424 /* Setup the Base and Length of the Rx Descriptor Ring */
425 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
426 base_addr & UINT64_LOWER_32BITS_MASK);
427 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
428 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
429 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
431 /* Configure the Rx buffer size for one buff without split */
432 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
433 RTE_PKTMBUF_HEADROOM);
434 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
435 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
437 /* It adds dual VLAN length for supporting dual VLAN */
438 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
439 2 * FM10K_VLAN_TAG_SIZE) > buf_size){
440 dev->data->scattered_rx = 1;
441 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
444 /* Enable drop on empty, it's RO for VF */
445 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
446 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
448 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
449 FM10K_WRITE_FLUSH(hw);
452 if (dev->data->dev_conf.rxmode.enable_scatter) {
453 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
454 dev->data->scattered_rx = 1;
457 /* Configure RSS if applicable */
458 fm10k_dev_mq_rx_configure(dev);
463 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
465 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
468 struct fm10k_rx_queue *rxq;
470 PMD_INIT_FUNC_TRACE();
472 if (rx_queue_id < dev->data->nb_rx_queues) {
473 rxq = dev->data->rx_queues[rx_queue_id];
474 err = rx_queue_reset(rxq);
475 if (err == -ENOMEM) {
476 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
478 } else if (err == -EINVAL) {
479 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
484 /* Setup the HW Rx Head and Tail Descriptor Pointers
485 * Note: this must be done AFTER the queue is enabled on real
486 * hardware, but BEFORE the queue is enabled when using the
487 * emulation platform. Do it in both places for now and remove
488 * this comment and the following two register writes when the
489 * emulation platform is no longer being used.
491 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
492 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
494 /* Set PF ownership flag for PF devices */
495 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
496 if (hw->mac.type == fm10k_mac_pf)
497 reg |= FM10K_RXQCTL_PF;
498 reg |= FM10K_RXQCTL_ENABLE;
499 /* enable RX queue */
500 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
501 FM10K_WRITE_FLUSH(hw);
503 /* Setup the HW Rx Head and Tail Descriptor Pointers
504 * Note: this must be done AFTER the queue is enabled
506 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
507 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
514 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
516 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518 PMD_INIT_FUNC_TRACE();
520 if (rx_queue_id < dev->data->nb_rx_queues) {
521 /* Disable RX queue */
522 rx_queue_disable(hw, rx_queue_id);
524 /* Free mbuf and clean HW ring */
525 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
532 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
534 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
535 /** @todo - this should be defined in the shared code */
536 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
537 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
540 PMD_INIT_FUNC_TRACE();
542 if (tx_queue_id < dev->data->nb_tx_queues) {
543 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
545 /* reset head and tail pointers */
546 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
547 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
549 /* enable TX queue */
550 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
551 FM10K_TXDCTL_ENABLE | txdctl);
552 FM10K_WRITE_FLUSH(hw);
560 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
562 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 PMD_INIT_FUNC_TRACE();
566 if (tx_queue_id < dev->data->nb_tx_queues) {
567 tx_queue_disable(hw, tx_queue_id);
568 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
574 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
576 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
577 != FM10K_DGLORTMAP_NONE);
581 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
583 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586 PMD_INIT_FUNC_TRACE();
588 /* Return if it didn't acquire valid glort range */
589 if (!fm10k_glort_valid(hw))
593 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
594 FM10K_XCAST_MODE_PROMISC);
595 fm10k_mbx_unlock(hw);
597 if (status != FM10K_SUCCESS)
598 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
602 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
604 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 PMD_INIT_FUNC_TRACE();
610 /* Return if it didn't acquire valid glort range */
611 if (!fm10k_glort_valid(hw))
614 if (dev->data->all_multicast == 1)
615 mode = FM10K_XCAST_MODE_ALLMULTI;
617 mode = FM10K_XCAST_MODE_NONE;
620 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
622 fm10k_mbx_unlock(hw);
624 if (status != FM10K_SUCCESS)
625 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
629 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
631 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634 PMD_INIT_FUNC_TRACE();
636 /* Return if it didn't acquire valid glort range */
637 if (!fm10k_glort_valid(hw))
640 /* If promiscuous mode is enabled, it doesn't make sense to enable
641 * allmulticast and disable promiscuous since fm10k only can select
644 if (dev->data->promiscuous) {
645 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
646 "needn't enable allmulticast");
651 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
652 FM10K_XCAST_MODE_ALLMULTI);
653 fm10k_mbx_unlock(hw);
655 if (status != FM10K_SUCCESS)
656 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
660 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
662 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
665 PMD_INIT_FUNC_TRACE();
667 /* Return if it didn't acquire valid glort range */
668 if (!fm10k_glort_valid(hw))
671 if (dev->data->promiscuous) {
672 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
673 "since promisc mode is enabled");
678 /* Change mode to unicast mode */
679 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
680 FM10K_XCAST_MODE_NONE);
681 fm10k_mbx_unlock(hw);
683 if (status != FM10K_SUCCESS)
684 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
687 /* fls = find last set bit = 32 minus the number of leading zeros */
689 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
691 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
693 fm10k_dev_start(struct rte_eth_dev *dev)
695 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
698 PMD_INIT_FUNC_TRACE();
700 /* stop, init, then start the hw */
701 diag = fm10k_stop_hw(hw);
702 if (diag != FM10K_SUCCESS) {
703 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
707 diag = fm10k_init_hw(hw);
708 if (diag != FM10K_SUCCESS) {
709 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
713 diag = fm10k_start_hw(hw);
714 if (diag != FM10K_SUCCESS) {
715 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
719 diag = fm10k_dev_tx_init(dev);
721 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
725 diag = fm10k_dev_rx_init(dev);
727 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
731 if (hw->mac.type == fm10k_mac_pf) {
732 /* Establish only VSI 0 as valid */
733 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
735 /* Configure RSS bits used in RETA table */
736 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
737 fls(dev->data->nb_rx_queues - 1) <<
738 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
740 /* Invalidate all other GLORT entries */
741 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
742 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
743 FM10K_DGLORTMAP_NONE);
746 for (i = 0; i < dev->data->nb_rx_queues; i++) {
747 struct fm10k_rx_queue *rxq;
748 rxq = dev->data->rx_queues[i];
750 if (rxq->rx_deferred_start)
752 diag = fm10k_dev_rx_queue_start(dev, i);
755 for (j = 0; j < i; ++j)
756 rx_queue_clean(dev->data->rx_queues[j]);
761 for (i = 0; i < dev->data->nb_tx_queues; i++) {
762 struct fm10k_tx_queue *txq;
763 txq = dev->data->tx_queues[i];
765 if (txq->tx_deferred_start)
767 diag = fm10k_dev_tx_queue_start(dev, i);
770 for (j = 0; j < dev->data->nb_rx_queues; ++j)
771 rx_queue_clean(dev->data->rx_queues[j]);
780 fm10k_dev_stop(struct rte_eth_dev *dev)
784 PMD_INIT_FUNC_TRACE();
786 for (i = 0; i < dev->data->nb_tx_queues; i++)
787 fm10k_dev_tx_queue_stop(dev, i);
789 for (i = 0; i < dev->data->nb_rx_queues; i++)
790 fm10k_dev_rx_queue_stop(dev, i);
794 fm10k_dev_close(struct rte_eth_dev *dev)
796 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 PMD_INIT_FUNC_TRACE();
800 /* Stop mailbox service first */
801 fm10k_close_mbx_service(hw);
807 fm10k_link_update(struct rte_eth_dev *dev,
808 __rte_unused int wait_to_complete)
810 PMD_INIT_FUNC_TRACE();
812 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
813 * x8 PCIe interface. For now, we leave the speed undefined since there
814 * is no 50Gbps Ethernet. */
815 dev->data->dev_link.link_speed = 0;
816 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
817 dev->data->dev_link.link_status = 1;
823 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
825 uint64_t ipackets, opackets, ibytes, obytes;
826 struct fm10k_hw *hw =
827 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
828 struct fm10k_hw_stats *hw_stats =
829 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
832 PMD_INIT_FUNC_TRACE();
834 fm10k_update_hw_stats(hw, hw_stats);
836 ipackets = opackets = ibytes = obytes = 0;
837 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
838 (i < FM10K_MAX_QUEUES_PF); ++i) {
839 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
840 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
841 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
842 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
843 ipackets += stats->q_ipackets[i];
844 opackets += stats->q_opackets[i];
845 ibytes += stats->q_ibytes[i];
846 obytes += stats->q_obytes[i];
848 stats->ipackets = ipackets;
849 stats->opackets = opackets;
850 stats->ibytes = ibytes;
851 stats->obytes = obytes;
855 fm10k_stats_reset(struct rte_eth_dev *dev)
857 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 struct fm10k_hw_stats *hw_stats =
859 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
861 PMD_INIT_FUNC_TRACE();
863 memset(hw_stats, 0, sizeof(*hw_stats));
864 fm10k_rebind_hw_stats(hw, hw_stats);
868 fm10k_dev_infos_get(struct rte_eth_dev *dev,
869 struct rte_eth_dev_info *dev_info)
871 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873 PMD_INIT_FUNC_TRACE();
875 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
876 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
877 dev_info->max_rx_queues = hw->mac.max_queues;
878 dev_info->max_tx_queues = hw->mac.max_queues;
879 dev_info->max_mac_addrs = 1;
880 dev_info->max_hash_mac_addrs = 0;
881 dev_info->max_vfs = dev->pci_dev->max_vfs;
882 dev_info->max_vmdq_pools = ETH_64_POOLS;
883 dev_info->rx_offload_capa =
884 DEV_RX_OFFLOAD_IPV4_CKSUM |
885 DEV_RX_OFFLOAD_UDP_CKSUM |
886 DEV_RX_OFFLOAD_TCP_CKSUM;
887 dev_info->tx_offload_capa = 0;
888 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
890 dev_info->default_rxconf = (struct rte_eth_rxconf) {
892 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
893 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
894 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
896 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
900 dev_info->default_txconf = (struct rte_eth_txconf) {
902 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
903 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
904 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
906 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
907 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
908 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
909 ETH_TXQ_FLAGS_NOOFFLOADS,
915 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
917 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 PMD_INIT_FUNC_TRACE();
921 /* @todo - add support for the VF */
922 if (hw->mac.type != fm10k_mac_pf)
925 return fm10k_update_vlan(hw, vlan_id, 0, on);
929 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
931 if ((request < min) || (request > max) || ((request % mult) != 0))
938 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
939 * the physical address is required. If the memzone is already created, then
940 * this function returns a pointer to the existing memzone.
942 static inline const struct rte_memzone *
943 allocate_hw_ring(const char *driver_name, const char *ring_name,
944 uint8_t port_id, uint16_t queue_id, int socket_id,
945 uint32_t size, uint32_t align)
947 char name[RTE_MEMZONE_NAMESIZE];
948 const struct rte_memzone *mz;
950 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
951 driver_name, ring_name, port_id, queue_id, socket_id);
953 /* return the memzone if it already exists */
954 mz = rte_memzone_lookup(name);
958 #ifdef RTE_LIBRTE_XEN_DOM0
959 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
962 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
967 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
969 if ((request < min) || (request > max) || ((div % request) != 0))
976 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
978 uint16_t rx_free_thresh;
980 if (conf->rx_free_thresh == 0)
981 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
983 rx_free_thresh = conf->rx_free_thresh;
985 /* make sure the requested threshold satisfies the constraints */
986 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
987 FM10K_RX_FREE_THRESH_MAX(q),
988 FM10K_RX_FREE_THRESH_DIV(q),
990 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
991 "less than or equal to %u, "
992 "greater than or equal to %u, "
993 "and a divisor of %u",
994 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
995 FM10K_RX_FREE_THRESH_MIN(q),
996 FM10K_RX_FREE_THRESH_DIV(q));
1000 q->alloc_thresh = rx_free_thresh;
1001 q->drop_en = conf->rx_drop_en;
1002 q->rx_deferred_start = conf->rx_deferred_start;
1008 * Hardware requires specific alignment for Rx packet buffers. At
1009 * least one of the following two conditions must be satisfied.
1010 * 1. Address is 512B aligned
1011 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1013 * As such, the driver may need to adjust the DMA address within the
1014 * buffer by up to 512B. The mempool element size is checked here
1015 * to make sure a maximally sized Ethernet frame can still be wholly
1016 * contained within the buffer after 512B alignment.
1018 * return 1 if the element size is valid, otherwise return 0.
1021 mempool_element_size_valid(struct rte_mempool *mp)
1025 /* elt_size includes mbuf header and headroom */
1026 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1027 RTE_PKTMBUF_HEADROOM;
1029 /* account for up to 512B of alignment */
1030 min_size -= FM10K_RX_BUFF_ALIGN;
1032 /* sanity check for overflow */
1033 if (min_size > mp->elt_size)
1036 if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
1044 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1045 uint16_t nb_desc, unsigned int socket_id,
1046 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1048 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1049 struct fm10k_rx_queue *q;
1050 const struct rte_memzone *mz;
1052 PMD_INIT_FUNC_TRACE();
1054 /* make sure the mempool element size can account for alignment. */
1055 if (!mempool_element_size_valid(mp)) {
1056 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1060 /* make sure a valid number of descriptors have been requested */
1061 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1062 FM10K_MULT_RX_DESC, nb_desc)) {
1063 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1064 "less than or equal to %"PRIu32", "
1065 "greater than or equal to %u, "
1066 "and a multiple of %u",
1067 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1068 FM10K_MULT_RX_DESC);
1073 * if this queue existed already, free the associated memory. The
1074 * queue cannot be reused in case we need to allocate memory on
1075 * different socket than was previously used.
1077 if (dev->data->rx_queues[queue_id] != NULL) {
1078 rx_queue_free(dev->data->rx_queues[queue_id]);
1079 dev->data->rx_queues[queue_id] = NULL;
1082 /* allocate memory for the queue structure */
1083 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1086 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1092 q->nb_desc = nb_desc;
1093 q->port_id = dev->data->port_id;
1094 q->queue_id = queue_id;
1095 q->tail_ptr = (volatile uint32_t *)
1096 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1097 if (handle_rxconf(q, conf))
1100 /* allocate memory for the software ring */
1101 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1102 nb_desc * sizeof(struct rte_mbuf *),
1103 RTE_CACHE_LINE_SIZE, socket_id);
1104 if (q->sw_ring == NULL) {
1105 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1111 * allocate memory for the hardware descriptor ring. A memzone large
1112 * enough to hold the maximum ring size is requested to allow for
1113 * resizing in later calls to the queue setup function.
1115 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1116 dev->data->port_id, queue_id, socket_id,
1117 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1119 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1120 rte_free(q->sw_ring);
1124 q->hw_ring = mz->addr;
1125 q->hw_ring_phys_addr = mz->phys_addr;
1127 dev->data->rx_queues[queue_id] = q;
1132 fm10k_rx_queue_release(void *queue)
1134 PMD_INIT_FUNC_TRACE();
1136 rx_queue_free(queue);
1140 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1142 uint16_t tx_free_thresh;
1143 uint16_t tx_rs_thresh;
1145 /* constraint MACROs require that tx_free_thresh is configured
1146 * before tx_rs_thresh */
1147 if (conf->tx_free_thresh == 0)
1148 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1150 tx_free_thresh = conf->tx_free_thresh;
1152 /* make sure the requested threshold satisfies the constraints */
1153 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1154 FM10K_TX_FREE_THRESH_MAX(q),
1155 FM10K_TX_FREE_THRESH_DIV(q),
1157 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1158 "less than or equal to %u, "
1159 "greater than or equal to %u, "
1160 "and a divisor of %u",
1161 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1162 FM10K_TX_FREE_THRESH_MIN(q),
1163 FM10K_TX_FREE_THRESH_DIV(q));
1167 q->free_thresh = tx_free_thresh;
1169 if (conf->tx_rs_thresh == 0)
1170 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1172 tx_rs_thresh = conf->tx_rs_thresh;
1174 q->tx_deferred_start = conf->tx_deferred_start;
1176 /* make sure the requested threshold satisfies the constraints */
1177 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1178 FM10K_TX_RS_THRESH_MAX(q),
1179 FM10K_TX_RS_THRESH_DIV(q),
1181 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1182 "less than or equal to %u, "
1183 "greater than or equal to %u, "
1184 "and a divisor of %u",
1185 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1186 FM10K_TX_RS_THRESH_MIN(q),
1187 FM10K_TX_RS_THRESH_DIV(q));
1191 q->rs_thresh = tx_rs_thresh;
1197 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1198 uint16_t nb_desc, unsigned int socket_id,
1199 const struct rte_eth_txconf *conf)
1201 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202 struct fm10k_tx_queue *q;
1203 const struct rte_memzone *mz;
1205 PMD_INIT_FUNC_TRACE();
1207 /* make sure a valid number of descriptors have been requested */
1208 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1209 FM10K_MULT_TX_DESC, nb_desc)) {
1210 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1211 "less than or equal to %"PRIu32", "
1212 "greater than or equal to %u, "
1213 "and a multiple of %u",
1214 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1215 FM10K_MULT_TX_DESC);
1220 * if this queue existed already, free the associated memory. The
1221 * queue cannot be reused in case we need to allocate memory on
1222 * different socket than was previously used.
1224 if (dev->data->tx_queues[queue_id] != NULL) {
1225 tx_queue_free(dev->data->tx_queues[queue_id]);
1226 dev->data->tx_queues[queue_id] = NULL;
1229 /* allocate memory for the queue structure */
1230 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1233 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1238 q->nb_desc = nb_desc;
1239 q->port_id = dev->data->port_id;
1240 q->queue_id = queue_id;
1241 q->tail_ptr = (volatile uint32_t *)
1242 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1243 if (handle_txconf(q, conf))
1246 /* allocate memory for the software ring */
1247 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1248 nb_desc * sizeof(struct rte_mbuf *),
1249 RTE_CACHE_LINE_SIZE, socket_id);
1250 if (q->sw_ring == NULL) {
1251 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1257 * allocate memory for the hardware descriptor ring. A memzone large
1258 * enough to hold the maximum ring size is requested to allow for
1259 * resizing in later calls to the queue setup function.
1261 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1262 dev->data->port_id, queue_id, socket_id,
1263 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1265 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1266 rte_free(q->sw_ring);
1270 q->hw_ring = mz->addr;
1271 q->hw_ring_phys_addr = mz->phys_addr;
1274 * allocate memory for the RS bit tracker. Enough slots to hold the
1275 * descriptor index for each RS bit needing to be set are required.
1277 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1278 ((nb_desc + 1) / q->rs_thresh) *
1280 RTE_CACHE_LINE_SIZE, socket_id);
1281 if (q->rs_tracker.list == NULL) {
1282 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1283 rte_free(q->sw_ring);
1288 dev->data->tx_queues[queue_id] = q;
1293 fm10k_tx_queue_release(void *queue)
1295 PMD_INIT_FUNC_TRACE();
1297 tx_queue_free(queue);
1301 fm10k_reta_update(struct rte_eth_dev *dev,
1302 struct rte_eth_rss_reta_entry64 *reta_conf,
1305 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1306 uint16_t i, j, idx, shift;
1310 PMD_INIT_FUNC_TRACE();
1312 if (reta_size > FM10K_MAX_RSS_INDICES) {
1313 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1314 "(%d) doesn't match the number hardware can supported "
1315 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1320 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1321 * 128-entries in 32 registers
1323 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1324 idx = i / RTE_RETA_GROUP_SIZE;
1325 shift = i % RTE_RETA_GROUP_SIZE;
1326 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1327 BIT_MASK_PER_UINT32);
1332 if (mask != BIT_MASK_PER_UINT32)
1333 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1335 for (j = 0; j < CHARS_PER_UINT32; j++) {
1336 if (mask & (0x1 << j)) {
1338 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1339 reta |= reta_conf[idx].reta[shift + j] <<
1343 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1350 fm10k_reta_query(struct rte_eth_dev *dev,
1351 struct rte_eth_rss_reta_entry64 *reta_conf,
1354 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1355 uint16_t i, j, idx, shift;
1359 PMD_INIT_FUNC_TRACE();
1361 if (reta_size < FM10K_MAX_RSS_INDICES) {
1362 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1363 "(%d) doesn't match the number hardware can supported "
1364 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1369 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1370 * 128-entries in 32 registers
1372 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1373 idx = i / RTE_RETA_GROUP_SIZE;
1374 shift = i % RTE_RETA_GROUP_SIZE;
1375 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1376 BIT_MASK_PER_UINT32);
1380 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1381 for (j = 0; j < CHARS_PER_UINT32; j++) {
1382 if (mask & (0x1 << j))
1383 reta_conf[idx].reta[shift + j] = ((reta >>
1384 CHAR_BIT * j) & UINT8_MAX);
1392 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1393 struct rte_eth_rss_conf *rss_conf)
1395 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1396 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1398 uint64_t hf = rss_conf->rss_hf;
1401 PMD_INIT_FUNC_TRACE();
1403 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1404 FM10K_RSSRK_ENTRIES_PER_REG)
1411 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1412 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1413 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1414 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1415 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1416 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1417 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1418 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1419 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1421 /* If the mapping doesn't fit any supported, return */
1426 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1427 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1429 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1435 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1436 struct rte_eth_rss_conf *rss_conf)
1438 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1444 PMD_INIT_FUNC_TRACE();
1446 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1447 FM10K_RSSRK_ENTRIES_PER_REG)
1451 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1452 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1454 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1456 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1457 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1458 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1459 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1460 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1461 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1462 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1463 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1464 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1466 rss_conf->rss_hf = hf;
1472 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1474 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1475 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1477 /* Bind all local non-queue interrupt to vector 0 */
1480 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1481 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1482 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1483 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1484 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1485 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1487 /* Enable misc causes */
1488 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1489 FM10K_EIMR_ENABLE(THI_FAULT) |
1490 FM10K_EIMR_ENABLE(FUM_FAULT) |
1491 FM10K_EIMR_ENABLE(MAILBOX) |
1492 FM10K_EIMR_ENABLE(SWITCHREADY) |
1493 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1494 FM10K_EIMR_ENABLE(SRAMERROR) |
1495 FM10K_EIMR_ENABLE(VFLR));
1498 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1499 FM10K_ITR_MASK_CLEAR);
1500 FM10K_WRITE_FLUSH(hw);
1504 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1506 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1507 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1509 /* Bind all local non-queue interrupt to vector 0 */
1512 /* Only INT 0 available, other 15 are reserved. */
1513 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1516 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1517 FM10K_ITR_MASK_CLEAR);
1518 FM10K_WRITE_FLUSH(hw);
1522 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1524 struct fm10k_fault fault;
1526 const char *estr = "Unknown error";
1528 /* Process PCA fault */
1529 if (eicr & FM10K_EIMR_PCA_FAULT) {
1530 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1533 switch (fault.type) {
1535 estr = "PCA_NO_FAULT"; break;
1536 case PCA_UNMAPPED_ADDR:
1537 estr = "PCA_UNMAPPED_ADDR"; break;
1538 case PCA_BAD_QACCESS_PF:
1539 estr = "PCA_BAD_QACCESS_PF"; break;
1540 case PCA_BAD_QACCESS_VF:
1541 estr = "PCA_BAD_QACCESS_VF"; break;
1542 case PCA_MALICIOUS_REQ:
1543 estr = "PCA_MALICIOUS_REQ"; break;
1544 case PCA_POISONED_TLP:
1545 estr = "PCA_POISONED_TLP"; break;
1547 estr = "PCA_TLP_ABORT"; break;
1551 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1552 estr, fault.func ? "VF" : "PF", fault.func,
1553 fault.address, fault.specinfo);
1556 /* Process THI fault */
1557 if (eicr & FM10K_EIMR_THI_FAULT) {
1558 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1561 switch (fault.type) {
1563 estr = "THI_NO_FAULT"; break;
1564 case THI_MAL_DIS_Q_FAULT:
1565 estr = "THI_MAL_DIS_Q_FAULT"; break;
1569 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1570 estr, fault.func ? "VF" : "PF", fault.func,
1571 fault.address, fault.specinfo);
1574 /* Process FUM fault */
1575 if (eicr & FM10K_EIMR_FUM_FAULT) {
1576 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1579 switch (fault.type) {
1581 estr = "FUM_NO_FAULT"; break;
1582 case FUM_UNMAPPED_ADDR:
1583 estr = "FUM_UNMAPPED_ADDR"; break;
1584 case FUM_POISONED_TLP:
1585 estr = "FUM_POISONED_TLP"; break;
1586 case FUM_BAD_VF_QACCESS:
1587 estr = "FUM_BAD_VF_QACCESS"; break;
1588 case FUM_ADD_DECODE_ERR:
1589 estr = "FUM_ADD_DECODE_ERR"; break;
1591 estr = "FUM_RO_ERROR"; break;
1592 case FUM_QPRC_CRC_ERROR:
1593 estr = "FUM_QPRC_CRC_ERROR"; break;
1594 case FUM_CSR_TIMEOUT:
1595 estr = "FUM_CSR_TIMEOUT"; break;
1596 case FUM_INVALID_TYPE:
1597 estr = "FUM_INVALID_TYPE"; break;
1598 case FUM_INVALID_LENGTH:
1599 estr = "FUM_INVALID_LENGTH"; break;
1600 case FUM_INVALID_BE:
1601 estr = "FUM_INVALID_BE"; break;
1602 case FUM_INVALID_ALIGN:
1603 estr = "FUM_INVALID_ALIGN"; break;
1607 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1608 estr, fault.func ? "VF" : "PF", fault.func,
1609 fault.address, fault.specinfo);
1616 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1621 * PF interrupt handler triggered by NIC for handling specific interrupt.
1624 * Pointer to interrupt handle.
1626 * The address of parameter (struct rte_eth_dev *) regsitered before.
1632 fm10k_dev_interrupt_handler_pf(
1633 __rte_unused struct rte_intr_handle *handle,
1636 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1637 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1638 uint32_t cause, status;
1640 if (hw->mac.type != fm10k_mac_pf)
1643 cause = FM10K_READ_REG(hw, FM10K_EICR);
1645 /* Handle PCI fault cases */
1646 if (cause & FM10K_EICR_FAULT_MASK) {
1647 PMD_INIT_LOG(ERR, "INT: find fault!");
1648 fm10k_dev_handle_fault(hw, cause);
1651 /* Handle switch up/down */
1652 if (cause & FM10K_EICR_SWITCHNOTREADY)
1653 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1655 if (cause & FM10K_EICR_SWITCHREADY)
1656 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1658 /* Handle mailbox message */
1660 hw->mbx.ops.process(hw, &hw->mbx);
1661 fm10k_mbx_unlock(hw);
1663 /* Handle SRAM error */
1664 if (cause & FM10K_EICR_SRAMERROR) {
1665 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1667 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1668 /* Write to clear pending bits */
1669 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1671 /* Todo: print out error message after shared code updates */
1674 /* Clear these 3 events if having any */
1675 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1676 FM10K_EICR_SWITCHREADY;
1678 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1680 /* Re-enable interrupt from device side */
1681 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1682 FM10K_ITR_MASK_CLEAR);
1683 /* Re-enable interrupt from host side */
1684 rte_intr_enable(&(dev->pci_dev->intr_handle));
1688 * VF interrupt handler triggered by NIC for handling specific interrupt.
1691 * Pointer to interrupt handle.
1693 * The address of parameter (struct rte_eth_dev *) regsitered before.
1699 fm10k_dev_interrupt_handler_vf(
1700 __rte_unused struct rte_intr_handle *handle,
1703 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1704 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1706 if (hw->mac.type != fm10k_mac_vf)
1709 /* Handle mailbox message if lock is acquired */
1711 hw->mbx.ops.process(hw, &hw->mbx);
1712 fm10k_mbx_unlock(hw);
1714 /* Re-enable interrupt from device side */
1715 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1716 FM10K_ITR_MASK_CLEAR);
1717 /* Re-enable interrupt from host side */
1718 rte_intr_enable(&(dev->pci_dev->intr_handle));
1721 /* Mailbox message handler in VF */
1722 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1723 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1724 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1725 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1726 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1729 /* Mailbox message handler in PF */
1730 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1731 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1732 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1733 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1734 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1735 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1736 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1737 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1741 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1745 /* Initialize mailbox lock */
1746 fm10k_mbx_initlock(hw);
1748 /* Replace default message handler with new ones */
1749 if (hw->mac.type == fm10k_mac_pf)
1750 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1752 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1755 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1759 /* Connect to SM for PF device or PF for VF device */
1760 return hw->mbx.ops.connect(hw, &hw->mbx);
1764 fm10k_close_mbx_service(struct fm10k_hw *hw)
1766 /* Disconnect from SM for PF device or PF for VF device */
1767 hw->mbx.ops.disconnect(hw, &hw->mbx);
1770 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1771 .dev_configure = fm10k_dev_configure,
1772 .dev_start = fm10k_dev_start,
1773 .dev_stop = fm10k_dev_stop,
1774 .dev_close = fm10k_dev_close,
1775 .promiscuous_enable = fm10k_dev_promiscuous_enable,
1776 .promiscuous_disable = fm10k_dev_promiscuous_disable,
1777 .allmulticast_enable = fm10k_dev_allmulticast_enable,
1778 .allmulticast_disable = fm10k_dev_allmulticast_disable,
1779 .stats_get = fm10k_stats_get,
1780 .stats_reset = fm10k_stats_reset,
1781 .link_update = fm10k_link_update,
1782 .dev_infos_get = fm10k_dev_infos_get,
1783 .vlan_filter_set = fm10k_vlan_filter_set,
1784 .rx_queue_start = fm10k_dev_rx_queue_start,
1785 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1786 .tx_queue_start = fm10k_dev_tx_queue_start,
1787 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1788 .rx_queue_setup = fm10k_rx_queue_setup,
1789 .rx_queue_release = fm10k_rx_queue_release,
1790 .tx_queue_setup = fm10k_tx_queue_setup,
1791 .tx_queue_release = fm10k_tx_queue_release,
1792 .reta_update = fm10k_reta_update,
1793 .reta_query = fm10k_reta_query,
1794 .rss_hash_update = fm10k_rss_hash_update,
1795 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1799 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1801 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 PMD_INIT_FUNC_TRACE();
1806 dev->dev_ops = &fm10k_eth_dev_ops;
1807 dev->rx_pkt_burst = &fm10k_recv_pkts;
1808 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1810 if (dev->data->scattered_rx)
1811 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
1813 /* only initialize in the primary process */
1814 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1817 /* Vendor and Device ID need to be set before init of shared code */
1818 memset(hw, 0, sizeof(*hw));
1819 hw->device_id = dev->pci_dev->id.device_id;
1820 hw->vendor_id = dev->pci_dev->id.vendor_id;
1821 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1822 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1823 hw->revision_id = 0;
1824 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1825 if (hw->hw_addr == NULL) {
1826 PMD_INIT_LOG(ERR, "Bad mem resource."
1827 " Try to blacklist unused devices.");
1831 /* Store fm10k_adapter pointer */
1832 hw->back = dev->data->dev_private;
1834 /* Initialize the shared code */
1835 diag = fm10k_init_shared_code(hw);
1836 if (diag != FM10K_SUCCESS) {
1837 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1842 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1843 * there is no way to get link status without reading BAR4. Until this
1844 * works, assume we have maximum bandwidth.
1845 * @todo - fix bus info
1847 hw->bus_caps.speed = fm10k_bus_speed_8000;
1848 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1849 hw->bus_caps.payload = fm10k_bus_payload_512;
1850 hw->bus.speed = fm10k_bus_speed_8000;
1851 hw->bus.width = fm10k_bus_width_pcie_x8;
1852 hw->bus.payload = fm10k_bus_payload_256;
1854 /* Initialize the hw */
1855 diag = fm10k_init_hw(hw);
1856 if (diag != FM10K_SUCCESS) {
1857 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1861 /* Initialize MAC address(es) */
1862 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1863 if (dev->data->mac_addrs == NULL) {
1864 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1868 diag = fm10k_read_mac_addr(hw);
1869 if (diag != FM10K_SUCCESS) {
1871 * TODO: remove special handling on VF. Need shared code to
1874 if (hw->mac.type == fm10k_mac_pf) {
1875 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1878 /* Generate a random addr */
1879 eth_random_addr(hw->mac.addr);
1880 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1884 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1885 &dev->data->mac_addrs[0]);
1887 /* Reset the hw statistics */
1888 fm10k_stats_reset(dev);
1891 diag = fm10k_reset_hw(hw);
1892 if (diag != FM10K_SUCCESS) {
1893 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1897 /* Setup mailbox service */
1898 diag = fm10k_setup_mbx_service(hw);
1899 if (diag != FM10K_SUCCESS) {
1900 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1904 /*PF/VF has different interrupt handling mechanism */
1905 if (hw->mac.type == fm10k_mac_pf) {
1906 /* register callback func to eal lib */
1907 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1908 fm10k_dev_interrupt_handler_pf, (void *)dev);
1910 /* enable MISC interrupt */
1911 fm10k_dev_enable_intr_pf(dev);
1913 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1914 fm10k_dev_interrupt_handler_vf, (void *)dev);
1916 fm10k_dev_enable_intr_vf(dev);
1920 * Below function will trigger operations on mailbox, acquire lock to
1921 * avoid race condition from interrupt handler. Operations on mailbox
1922 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1923 * will handle and generate an interrupt to our side. Then, FIFO in
1924 * mailbox will be touched.
1927 /* Enable port first */
1928 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1930 /* Update default vlan */
1931 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1934 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1935 * unused for VF. PF will assign correct glort for VF.
1937 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1938 hw->mac.default_vid, 1, 0);
1940 /* Set unicast mode by default. App can change to other mode in other
1943 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1944 FM10K_XCAST_MODE_NONE);
1946 fm10k_mbx_unlock(hw);
1948 /* enable uio intr after callback registered */
1949 rte_intr_enable(&(dev->pci_dev->intr_handle));
1955 * The set of PCI devices this driver supports. This driver will enable both PF
1956 * and SRIOV-VF devices.
1958 static const struct rte_pci_id pci_id_fm10k_map[] = {
1959 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1960 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1961 #include "rte_pci_dev_ids.h"
1962 { .vendor_id = 0, /* sentinel */ },
1965 static struct eth_driver rte_pmd_fm10k = {
1967 .name = "rte_pmd_fm10k",
1968 .id_table = pci_id_fm10k_map,
1969 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1971 .eth_dev_init = eth_fm10k_dev_init,
1972 .dev_private_size = sizeof(struct fm10k_adapter),
1976 * Driver initialization routine.
1977 * Invoked once at EAL init time.
1978 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1981 rte_pmd_fm10k_init(__rte_unused const char *name,
1982 __rte_unused const char *params)
1984 PMD_INIT_FUNC_TRACE();
1985 rte_eth_driver_register(&rte_pmd_fm10k);
1989 static struct rte_driver rte_fm10k_driver = {
1991 .init = rte_pmd_fm10k_init,
1994 PMD_REGISTER_DRIVER(rte_fm10k_driver);