4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 /* Number of chars per uint32 type */
49 #define CHARS_PER_UINT32 (sizeof(uint32_t))
50 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
52 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
53 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
54 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
55 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
56 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
57 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
60 fm10k_mbx_initlock(struct fm10k_hw *hw)
62 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
66 fm10k_mbx_lock(struct fm10k_hw *hw)
68 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
69 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
73 fm10k_mbx_unlock(struct fm10k_hw *hw)
75 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
79 * reset queue to initial state, allocate software buffers used when starting
82 * return -ENOMEM if buffers cannot be allocated
83 * return -EINVAL if buffers do not satisfy alignment condition
86 rx_queue_reset(struct fm10k_rx_queue *q)
90 PMD_INIT_FUNC_TRACE();
92 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
96 for (i = 0; i < q->nb_desc; ++i) {
97 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
98 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
99 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
103 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
104 q->hw_ring[i].q.pkt_addr = dma_addr;
105 q->hw_ring[i].q.hdr_addr = dma_addr;
110 q->next_trigger = q->alloc_thresh - 1;
111 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
116 * clean queue, descriptor rings, free software buffers used when stopping
120 rx_queue_clean(struct fm10k_rx_queue *q)
122 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
124 PMD_INIT_FUNC_TRACE();
126 /* zero descriptor rings */
127 for (i = 0; i < q->nb_desc; ++i)
128 q->hw_ring[i] = zero;
130 /* free software buffers */
131 for (i = 0; i < q->nb_desc; ++i) {
133 rte_pktmbuf_free_seg(q->sw_ring[i]);
134 q->sw_ring[i] = NULL;
140 * free all queue memory used when releasing the queue (i.e. configure)
143 rx_queue_free(struct fm10k_rx_queue *q)
145 PMD_INIT_FUNC_TRACE();
147 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
150 rte_free(q->sw_ring);
159 * disable RX queue, wait unitl HW finished necessary flush operation
162 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
166 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
167 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
168 reg & ~FM10K_RXQCTL_ENABLE);
170 /* Wait 100us at most */
171 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
173 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
174 if (!(reg & FM10K_RXQCTL_ENABLE))
178 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
185 * reset queue to initial state, allocate software buffers used when starting
189 tx_queue_reset(struct fm10k_tx_queue *q)
191 PMD_INIT_FUNC_TRACE();
195 q->nb_free = q->nb_desc - 1;
196 q->free_trigger = q->nb_free - q->free_thresh;
197 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
198 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
202 * clean queue, descriptor rings, free software buffers used when stopping
206 tx_queue_clean(struct fm10k_tx_queue *q)
208 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
210 PMD_INIT_FUNC_TRACE();
212 /* zero descriptor rings */
213 for (i = 0; i < q->nb_desc; ++i)
214 q->hw_ring[i] = zero;
216 /* free software buffers */
217 for (i = 0; i < q->nb_desc; ++i) {
219 rte_pktmbuf_free_seg(q->sw_ring[i]);
220 q->sw_ring[i] = NULL;
226 * free all queue memory used when releasing the queue (i.e. configure)
229 tx_queue_free(struct fm10k_tx_queue *q)
231 PMD_INIT_FUNC_TRACE();
233 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
235 if (q->rs_tracker.list) {
236 rte_free(q->rs_tracker.list);
237 q->rs_tracker.list = NULL;
240 rte_free(q->sw_ring);
249 * disable TX queue, wait unitl HW finished necessary flush operation
252 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
256 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
257 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
258 reg & ~FM10K_TXDCTL_ENABLE);
260 /* Wait 100us at most */
261 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
263 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
264 if (!(reg & FM10K_TXDCTL_ENABLE))
268 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
275 fm10k_dev_configure(struct rte_eth_dev *dev)
277 PMD_INIT_FUNC_TRACE();
279 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
280 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
286 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
288 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
289 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
290 uint32_t mrqc, *key, i, reta, j;
293 #define RSS_KEY_SIZE 40
294 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
295 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
296 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
297 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
298 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
299 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
302 if (dev->data->nb_rx_queues == 1 ||
303 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
304 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
307 /* random key is rss_intel_key (default) or user provided (rss_key) */
308 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
309 key = (uint32_t *)rss_intel_key;
311 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
313 /* Now fill our hash function seeds, 4 bytes at a time */
314 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
315 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
318 * Fill in redirection table
319 * The byte-swap is needed because NIC registers are in
320 * little-endian order.
323 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
324 if (j == dev->data->nb_rx_queues)
326 reta = (reta << CHAR_BIT) | j;
328 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
333 * Generate RSS hash based on packet types, TCP/UDP
334 * port numbers and/or IPv4/v6 src and dst addresses
336 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
338 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
339 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
340 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
341 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
342 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
343 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
344 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
345 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
346 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
349 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
354 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
358 fm10k_dev_tx_init(struct rte_eth_dev *dev)
360 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
362 struct fm10k_tx_queue *txq;
366 /* Disable TXINT to avoid possible interrupt */
367 for (i = 0; i < hw->mac.max_queues; i++)
368 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
369 3 << FM10K_TXINT_TIMER_SHIFT);
372 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
373 txq = dev->data->tx_queues[i];
374 base_addr = txq->hw_ring_phys_addr;
375 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
377 /* disable queue to avoid issues while updating state */
378 ret = tx_queue_disable(hw, i);
380 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
384 /* set location and size for descriptor ring */
385 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
386 base_addr & UINT64_LOWER_32BITS_MASK);
387 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
388 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
389 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
395 fm10k_dev_rx_init(struct rte_eth_dev *dev)
397 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
399 struct fm10k_rx_queue *rxq;
402 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
405 /* Disable RXINT to avoid possible interrupt */
406 for (i = 0; i < hw->mac.max_queues; i++)
407 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
408 3 << FM10K_RXINT_TIMER_SHIFT);
410 /* Setup RX queues */
411 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
412 rxq = dev->data->rx_queues[i];
413 base_addr = rxq->hw_ring_phys_addr;
414 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
416 /* disable queue to avoid issues while updating state */
417 ret = rx_queue_disable(hw, i);
419 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
423 /* Setup the Base and Length of the Rx Descriptor Ring */
424 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
425 base_addr & UINT64_LOWER_32BITS_MASK);
426 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
427 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
428 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
430 /* Configure the Rx buffer size for one buff without split */
431 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
432 RTE_PKTMBUF_HEADROOM);
433 /* As RX buffer is aligned to 512B within mbuf, some bytes are
434 * reserved for this purpose, and the worst case could be 511B.
435 * But SRR reg assumes all buffers have the same size. In order
436 * to fill the gap, we'll have to consider the worst case and
437 * assume 512B is reserved. If we don't do so, it's possible
438 * for HW to overwrite data to next mbuf.
440 buf_size -= FM10K_RX_DATABUF_ALIGN;
442 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
443 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
445 /* It adds dual VLAN length for supporting dual VLAN */
446 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
447 2 * FM10K_VLAN_TAG_SIZE) > buf_size){
448 dev->data->scattered_rx = 1;
449 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
452 /* Enable drop on empty, it's RO for VF */
453 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
454 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
456 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
457 FM10K_WRITE_FLUSH(hw);
460 if (dev->data->dev_conf.rxmode.enable_scatter) {
461 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
462 dev->data->scattered_rx = 1;
465 /* Configure RSS if applicable */
466 fm10k_dev_mq_rx_configure(dev);
471 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
473 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
476 struct fm10k_rx_queue *rxq;
478 PMD_INIT_FUNC_TRACE();
480 if (rx_queue_id < dev->data->nb_rx_queues) {
481 rxq = dev->data->rx_queues[rx_queue_id];
482 err = rx_queue_reset(rxq);
483 if (err == -ENOMEM) {
484 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
486 } else if (err == -EINVAL) {
487 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
492 /* Setup the HW Rx Head and Tail Descriptor Pointers
493 * Note: this must be done AFTER the queue is enabled on real
494 * hardware, but BEFORE the queue is enabled when using the
495 * emulation platform. Do it in both places for now and remove
496 * this comment and the following two register writes when the
497 * emulation platform is no longer being used.
499 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
500 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
502 /* Set PF ownership flag for PF devices */
503 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
504 if (hw->mac.type == fm10k_mac_pf)
505 reg |= FM10K_RXQCTL_PF;
506 reg |= FM10K_RXQCTL_ENABLE;
507 /* enable RX queue */
508 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
509 FM10K_WRITE_FLUSH(hw);
511 /* Setup the HW Rx Head and Tail Descriptor Pointers
512 * Note: this must be done AFTER the queue is enabled
514 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
515 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
522 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
524 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526 PMD_INIT_FUNC_TRACE();
528 if (rx_queue_id < dev->data->nb_rx_queues) {
529 /* Disable RX queue */
530 rx_queue_disable(hw, rx_queue_id);
532 /* Free mbuf and clean HW ring */
533 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
540 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
542 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
543 /** @todo - this should be defined in the shared code */
544 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
545 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
548 PMD_INIT_FUNC_TRACE();
550 if (tx_queue_id < dev->data->nb_tx_queues) {
551 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
553 /* reset head and tail pointers */
554 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
555 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
557 /* enable TX queue */
558 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
559 FM10K_TXDCTL_ENABLE | txdctl);
560 FM10K_WRITE_FLUSH(hw);
568 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
570 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
572 PMD_INIT_FUNC_TRACE();
574 if (tx_queue_id < dev->data->nb_tx_queues) {
575 tx_queue_disable(hw, tx_queue_id);
576 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
582 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
584 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
585 != FM10K_DGLORTMAP_NONE);
589 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
591 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594 PMD_INIT_FUNC_TRACE();
596 /* Return if it didn't acquire valid glort range */
597 if (!fm10k_glort_valid(hw))
601 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
602 FM10K_XCAST_MODE_PROMISC);
603 fm10k_mbx_unlock(hw);
605 if (status != FM10K_SUCCESS)
606 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
610 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
612 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 PMD_INIT_FUNC_TRACE();
618 /* Return if it didn't acquire valid glort range */
619 if (!fm10k_glort_valid(hw))
622 if (dev->data->all_multicast == 1)
623 mode = FM10K_XCAST_MODE_ALLMULTI;
625 mode = FM10K_XCAST_MODE_NONE;
628 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
630 fm10k_mbx_unlock(hw);
632 if (status != FM10K_SUCCESS)
633 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
637 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
639 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 PMD_INIT_FUNC_TRACE();
644 /* Return if it didn't acquire valid glort range */
645 if (!fm10k_glort_valid(hw))
648 /* If promiscuous mode is enabled, it doesn't make sense to enable
649 * allmulticast and disable promiscuous since fm10k only can select
652 if (dev->data->promiscuous) {
653 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
654 "needn't enable allmulticast");
659 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
660 FM10K_XCAST_MODE_ALLMULTI);
661 fm10k_mbx_unlock(hw);
663 if (status != FM10K_SUCCESS)
664 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
668 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
670 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
673 PMD_INIT_FUNC_TRACE();
675 /* Return if it didn't acquire valid glort range */
676 if (!fm10k_glort_valid(hw))
679 if (dev->data->promiscuous) {
680 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
681 "since promisc mode is enabled");
686 /* Change mode to unicast mode */
687 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
688 FM10K_XCAST_MODE_NONE);
689 fm10k_mbx_unlock(hw);
691 if (status != FM10K_SUCCESS)
692 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
695 /* fls = find last set bit = 32 minus the number of leading zeros */
697 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
699 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
701 fm10k_dev_start(struct rte_eth_dev *dev)
703 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
706 PMD_INIT_FUNC_TRACE();
708 /* stop, init, then start the hw */
709 diag = fm10k_stop_hw(hw);
710 if (diag != FM10K_SUCCESS) {
711 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
715 diag = fm10k_init_hw(hw);
716 if (diag != FM10K_SUCCESS) {
717 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
721 diag = fm10k_start_hw(hw);
722 if (diag != FM10K_SUCCESS) {
723 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
727 diag = fm10k_dev_tx_init(dev);
729 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
733 diag = fm10k_dev_rx_init(dev);
735 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
739 if (hw->mac.type == fm10k_mac_pf) {
740 /* Establish only VSI 0 as valid */
741 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
743 /* Configure RSS bits used in RETA table */
744 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
745 fls(dev->data->nb_rx_queues - 1) <<
746 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
748 /* Invalidate all other GLORT entries */
749 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
750 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
751 FM10K_DGLORTMAP_NONE);
754 for (i = 0; i < dev->data->nb_rx_queues; i++) {
755 struct fm10k_rx_queue *rxq;
756 rxq = dev->data->rx_queues[i];
758 if (rxq->rx_deferred_start)
760 diag = fm10k_dev_rx_queue_start(dev, i);
763 for (j = 0; j < i; ++j)
764 rx_queue_clean(dev->data->rx_queues[j]);
769 for (i = 0; i < dev->data->nb_tx_queues; i++) {
770 struct fm10k_tx_queue *txq;
771 txq = dev->data->tx_queues[i];
773 if (txq->tx_deferred_start)
775 diag = fm10k_dev_tx_queue_start(dev, i);
778 for (j = 0; j < dev->data->nb_rx_queues; ++j)
779 rx_queue_clean(dev->data->rx_queues[j]);
788 fm10k_dev_stop(struct rte_eth_dev *dev)
792 PMD_INIT_FUNC_TRACE();
794 for (i = 0; i < dev->data->nb_tx_queues; i++)
795 fm10k_dev_tx_queue_stop(dev, i);
797 for (i = 0; i < dev->data->nb_rx_queues; i++)
798 fm10k_dev_rx_queue_stop(dev, i);
802 fm10k_dev_close(struct rte_eth_dev *dev)
804 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
806 PMD_INIT_FUNC_TRACE();
808 /* Stop mailbox service first */
809 fm10k_close_mbx_service(hw);
815 fm10k_link_update(struct rte_eth_dev *dev,
816 __rte_unused int wait_to_complete)
818 PMD_INIT_FUNC_TRACE();
820 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
821 * x8 PCIe interface. For now, we leave the speed undefined since there
822 * is no 50Gbps Ethernet. */
823 dev->data->dev_link.link_speed = 0;
824 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
825 dev->data->dev_link.link_status = 1;
831 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
833 uint64_t ipackets, opackets, ibytes, obytes;
834 struct fm10k_hw *hw =
835 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
836 struct fm10k_hw_stats *hw_stats =
837 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
840 PMD_INIT_FUNC_TRACE();
842 fm10k_update_hw_stats(hw, hw_stats);
844 ipackets = opackets = ibytes = obytes = 0;
845 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
846 (i < hw->mac.max_queues); ++i) {
847 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
848 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
849 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
850 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
851 ipackets += stats->q_ipackets[i];
852 opackets += stats->q_opackets[i];
853 ibytes += stats->q_ibytes[i];
854 obytes += stats->q_obytes[i];
856 stats->ipackets = ipackets;
857 stats->opackets = opackets;
858 stats->ibytes = ibytes;
859 stats->obytes = obytes;
863 fm10k_stats_reset(struct rte_eth_dev *dev)
865 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
866 struct fm10k_hw_stats *hw_stats =
867 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
869 PMD_INIT_FUNC_TRACE();
871 memset(hw_stats, 0, sizeof(*hw_stats));
872 fm10k_rebind_hw_stats(hw, hw_stats);
876 fm10k_dev_infos_get(struct rte_eth_dev *dev,
877 struct rte_eth_dev_info *dev_info)
879 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
881 PMD_INIT_FUNC_TRACE();
883 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
884 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
885 dev_info->max_rx_queues = hw->mac.max_queues;
886 dev_info->max_tx_queues = hw->mac.max_queues;
887 dev_info->max_mac_addrs = 1;
888 dev_info->max_hash_mac_addrs = 0;
889 dev_info->max_vfs = dev->pci_dev->max_vfs;
890 dev_info->max_vmdq_pools = ETH_64_POOLS;
891 dev_info->rx_offload_capa =
892 DEV_RX_OFFLOAD_IPV4_CKSUM |
893 DEV_RX_OFFLOAD_UDP_CKSUM |
894 DEV_RX_OFFLOAD_TCP_CKSUM;
895 dev_info->tx_offload_capa = 0;
896 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
898 dev_info->default_rxconf = (struct rte_eth_rxconf) {
900 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
901 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
902 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
904 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
908 dev_info->default_txconf = (struct rte_eth_txconf) {
910 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
911 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
912 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
914 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
915 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
916 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
917 ETH_TXQ_FLAGS_NOOFFLOADS,
923 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
925 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
927 PMD_INIT_FUNC_TRACE();
929 /* @todo - add support for the VF */
930 if (hw->mac.type != fm10k_mac_pf)
933 return fm10k_update_vlan(hw, vlan_id, 0, on);
937 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
939 if ((request < min) || (request > max) || ((request % mult) != 0))
946 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
947 * the physical address is required. If the memzone is already created, then
948 * this function returns a pointer to the existing memzone.
950 static inline const struct rte_memzone *
951 allocate_hw_ring(const char *driver_name, const char *ring_name,
952 uint8_t port_id, uint16_t queue_id, int socket_id,
953 uint32_t size, uint32_t align)
955 char name[RTE_MEMZONE_NAMESIZE];
956 const struct rte_memzone *mz;
958 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
959 driver_name, ring_name, port_id, queue_id, socket_id);
961 /* return the memzone if it already exists */
962 mz = rte_memzone_lookup(name);
966 #ifdef RTE_LIBRTE_XEN_DOM0
967 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
970 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
975 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
977 if ((request < min) || (request > max) || ((div % request) != 0))
984 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
986 uint16_t rx_free_thresh;
988 if (conf->rx_free_thresh == 0)
989 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
991 rx_free_thresh = conf->rx_free_thresh;
993 /* make sure the requested threshold satisfies the constraints */
994 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
995 FM10K_RX_FREE_THRESH_MAX(q),
996 FM10K_RX_FREE_THRESH_DIV(q),
998 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
999 "less than or equal to %u, "
1000 "greater than or equal to %u, "
1001 "and a divisor of %u",
1002 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1003 FM10K_RX_FREE_THRESH_MIN(q),
1004 FM10K_RX_FREE_THRESH_DIV(q));
1008 q->alloc_thresh = rx_free_thresh;
1009 q->drop_en = conf->rx_drop_en;
1010 q->rx_deferred_start = conf->rx_deferred_start;
1016 * Hardware requires specific alignment for Rx packet buffers. At
1017 * least one of the following two conditions must be satisfied.
1018 * 1. Address is 512B aligned
1019 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1021 * As such, the driver may need to adjust the DMA address within the
1022 * buffer by up to 512B.
1024 * return 1 if the element size is valid, otherwise return 0.
1027 mempool_element_size_valid(struct rte_mempool *mp)
1031 /* elt_size includes mbuf header and headroom */
1032 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1033 RTE_PKTMBUF_HEADROOM;
1035 /* account for up to 512B of alignment */
1036 min_size -= FM10K_RX_DATABUF_ALIGN;
1038 /* sanity check for overflow */
1039 if (min_size > mp->elt_size)
1047 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1048 uint16_t nb_desc, unsigned int socket_id,
1049 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1051 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1052 struct fm10k_rx_queue *q;
1053 const struct rte_memzone *mz;
1055 PMD_INIT_FUNC_TRACE();
1057 /* make sure the mempool element size can account for alignment. */
1058 if (!mempool_element_size_valid(mp)) {
1059 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1063 /* make sure a valid number of descriptors have been requested */
1064 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1065 FM10K_MULT_RX_DESC, nb_desc)) {
1066 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1067 "less than or equal to %"PRIu32", "
1068 "greater than or equal to %u, "
1069 "and a multiple of %u",
1070 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1071 FM10K_MULT_RX_DESC);
1076 * if this queue existed already, free the associated memory. The
1077 * queue cannot be reused in case we need to allocate memory on
1078 * different socket than was previously used.
1080 if (dev->data->rx_queues[queue_id] != NULL) {
1081 rx_queue_free(dev->data->rx_queues[queue_id]);
1082 dev->data->rx_queues[queue_id] = NULL;
1085 /* allocate memory for the queue structure */
1086 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1089 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1095 q->nb_desc = nb_desc;
1096 q->port_id = dev->data->port_id;
1097 q->queue_id = queue_id;
1098 q->tail_ptr = (volatile uint32_t *)
1099 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1100 if (handle_rxconf(q, conf))
1103 /* allocate memory for the software ring */
1104 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1105 nb_desc * sizeof(struct rte_mbuf *),
1106 RTE_CACHE_LINE_SIZE, socket_id);
1107 if (q->sw_ring == NULL) {
1108 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1114 * allocate memory for the hardware descriptor ring. A memzone large
1115 * enough to hold the maximum ring size is requested to allow for
1116 * resizing in later calls to the queue setup function.
1118 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1119 dev->data->port_id, queue_id, socket_id,
1120 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1122 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1123 rte_free(q->sw_ring);
1127 q->hw_ring = mz->addr;
1128 q->hw_ring_phys_addr = mz->phys_addr;
1130 dev->data->rx_queues[queue_id] = q;
1135 fm10k_rx_queue_release(void *queue)
1137 PMD_INIT_FUNC_TRACE();
1139 rx_queue_free(queue);
1143 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1145 uint16_t tx_free_thresh;
1146 uint16_t tx_rs_thresh;
1148 /* constraint MACROs require that tx_free_thresh is configured
1149 * before tx_rs_thresh */
1150 if (conf->tx_free_thresh == 0)
1151 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1153 tx_free_thresh = conf->tx_free_thresh;
1155 /* make sure the requested threshold satisfies the constraints */
1156 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1157 FM10K_TX_FREE_THRESH_MAX(q),
1158 FM10K_TX_FREE_THRESH_DIV(q),
1160 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1161 "less than or equal to %u, "
1162 "greater than or equal to %u, "
1163 "and a divisor of %u",
1164 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1165 FM10K_TX_FREE_THRESH_MIN(q),
1166 FM10K_TX_FREE_THRESH_DIV(q));
1170 q->free_thresh = tx_free_thresh;
1172 if (conf->tx_rs_thresh == 0)
1173 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1175 tx_rs_thresh = conf->tx_rs_thresh;
1177 q->tx_deferred_start = conf->tx_deferred_start;
1179 /* make sure the requested threshold satisfies the constraints */
1180 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1181 FM10K_TX_RS_THRESH_MAX(q),
1182 FM10K_TX_RS_THRESH_DIV(q),
1184 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1185 "less than or equal to %u, "
1186 "greater than or equal to %u, "
1187 "and a divisor of %u",
1188 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1189 FM10K_TX_RS_THRESH_MIN(q),
1190 FM10K_TX_RS_THRESH_DIV(q));
1194 q->rs_thresh = tx_rs_thresh;
1200 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1201 uint16_t nb_desc, unsigned int socket_id,
1202 const struct rte_eth_txconf *conf)
1204 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 struct fm10k_tx_queue *q;
1206 const struct rte_memzone *mz;
1208 PMD_INIT_FUNC_TRACE();
1210 /* make sure a valid number of descriptors have been requested */
1211 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1212 FM10K_MULT_TX_DESC, nb_desc)) {
1213 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1214 "less than or equal to %"PRIu32", "
1215 "greater than or equal to %u, "
1216 "and a multiple of %u",
1217 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1218 FM10K_MULT_TX_DESC);
1223 * if this queue existed already, free the associated memory. The
1224 * queue cannot be reused in case we need to allocate memory on
1225 * different socket than was previously used.
1227 if (dev->data->tx_queues[queue_id] != NULL) {
1228 tx_queue_free(dev->data->tx_queues[queue_id]);
1229 dev->data->tx_queues[queue_id] = NULL;
1232 /* allocate memory for the queue structure */
1233 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1236 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1241 q->nb_desc = nb_desc;
1242 q->port_id = dev->data->port_id;
1243 q->queue_id = queue_id;
1244 q->tail_ptr = (volatile uint32_t *)
1245 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1246 if (handle_txconf(q, conf))
1249 /* allocate memory for the software ring */
1250 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1251 nb_desc * sizeof(struct rte_mbuf *),
1252 RTE_CACHE_LINE_SIZE, socket_id);
1253 if (q->sw_ring == NULL) {
1254 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1260 * allocate memory for the hardware descriptor ring. A memzone large
1261 * enough to hold the maximum ring size is requested to allow for
1262 * resizing in later calls to the queue setup function.
1264 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1265 dev->data->port_id, queue_id, socket_id,
1266 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1268 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1269 rte_free(q->sw_ring);
1273 q->hw_ring = mz->addr;
1274 q->hw_ring_phys_addr = mz->phys_addr;
1277 * allocate memory for the RS bit tracker. Enough slots to hold the
1278 * descriptor index for each RS bit needing to be set are required.
1280 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1281 ((nb_desc + 1) / q->rs_thresh) *
1283 RTE_CACHE_LINE_SIZE, socket_id);
1284 if (q->rs_tracker.list == NULL) {
1285 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1286 rte_free(q->sw_ring);
1291 dev->data->tx_queues[queue_id] = q;
1296 fm10k_tx_queue_release(void *queue)
1298 PMD_INIT_FUNC_TRACE();
1300 tx_queue_free(queue);
1304 fm10k_reta_update(struct rte_eth_dev *dev,
1305 struct rte_eth_rss_reta_entry64 *reta_conf,
1308 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1309 uint16_t i, j, idx, shift;
1313 PMD_INIT_FUNC_TRACE();
1315 if (reta_size > FM10K_MAX_RSS_INDICES) {
1316 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1317 "(%d) doesn't match the number hardware can supported "
1318 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1323 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1324 * 128-entries in 32 registers
1326 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1327 idx = i / RTE_RETA_GROUP_SIZE;
1328 shift = i % RTE_RETA_GROUP_SIZE;
1329 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1330 BIT_MASK_PER_UINT32);
1335 if (mask != BIT_MASK_PER_UINT32)
1336 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1338 for (j = 0; j < CHARS_PER_UINT32; j++) {
1339 if (mask & (0x1 << j)) {
1341 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1342 reta |= reta_conf[idx].reta[shift + j] <<
1346 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1353 fm10k_reta_query(struct rte_eth_dev *dev,
1354 struct rte_eth_rss_reta_entry64 *reta_conf,
1357 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358 uint16_t i, j, idx, shift;
1362 PMD_INIT_FUNC_TRACE();
1364 if (reta_size < FM10K_MAX_RSS_INDICES) {
1365 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1366 "(%d) doesn't match the number hardware can supported "
1367 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1372 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1373 * 128-entries in 32 registers
1375 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1376 idx = i / RTE_RETA_GROUP_SIZE;
1377 shift = i % RTE_RETA_GROUP_SIZE;
1378 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1379 BIT_MASK_PER_UINT32);
1383 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1384 for (j = 0; j < CHARS_PER_UINT32; j++) {
1385 if (mask & (0x1 << j))
1386 reta_conf[idx].reta[shift + j] = ((reta >>
1387 CHAR_BIT * j) & UINT8_MAX);
1395 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1396 struct rte_eth_rss_conf *rss_conf)
1398 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1399 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1401 uint64_t hf = rss_conf->rss_hf;
1404 PMD_INIT_FUNC_TRACE();
1406 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1407 FM10K_RSSRK_ENTRIES_PER_REG)
1414 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1415 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1416 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1417 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1418 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1419 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1420 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1421 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1422 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1424 /* If the mapping doesn't fit any supported, return */
1429 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1430 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1432 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1438 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1439 struct rte_eth_rss_conf *rss_conf)
1441 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1442 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1447 PMD_INIT_FUNC_TRACE();
1449 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1450 FM10K_RSSRK_ENTRIES_PER_REG)
1454 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1455 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1457 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1459 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1460 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1461 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1462 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1463 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1464 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1465 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1466 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1467 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1469 rss_conf->rss_hf = hf;
1475 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1477 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1480 /* Bind all local non-queue interrupt to vector 0 */
1483 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1484 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1485 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1486 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1487 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1488 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1490 /* Enable misc causes */
1491 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1492 FM10K_EIMR_ENABLE(THI_FAULT) |
1493 FM10K_EIMR_ENABLE(FUM_FAULT) |
1494 FM10K_EIMR_ENABLE(MAILBOX) |
1495 FM10K_EIMR_ENABLE(SWITCHREADY) |
1496 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1497 FM10K_EIMR_ENABLE(SRAMERROR) |
1498 FM10K_EIMR_ENABLE(VFLR));
1501 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1502 FM10K_ITR_MASK_CLEAR);
1503 FM10K_WRITE_FLUSH(hw);
1507 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1509 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1510 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1512 /* Bind all local non-queue interrupt to vector 0 */
1515 /* Only INT 0 available, other 15 are reserved. */
1516 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1519 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1520 FM10K_ITR_MASK_CLEAR);
1521 FM10K_WRITE_FLUSH(hw);
1525 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1527 struct fm10k_fault fault;
1529 const char *estr = "Unknown error";
1531 /* Process PCA fault */
1532 if (eicr & FM10K_EIMR_PCA_FAULT) {
1533 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1536 switch (fault.type) {
1538 estr = "PCA_NO_FAULT"; break;
1539 case PCA_UNMAPPED_ADDR:
1540 estr = "PCA_UNMAPPED_ADDR"; break;
1541 case PCA_BAD_QACCESS_PF:
1542 estr = "PCA_BAD_QACCESS_PF"; break;
1543 case PCA_BAD_QACCESS_VF:
1544 estr = "PCA_BAD_QACCESS_VF"; break;
1545 case PCA_MALICIOUS_REQ:
1546 estr = "PCA_MALICIOUS_REQ"; break;
1547 case PCA_POISONED_TLP:
1548 estr = "PCA_POISONED_TLP"; break;
1550 estr = "PCA_TLP_ABORT"; break;
1554 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1555 estr, fault.func ? "VF" : "PF", fault.func,
1556 fault.address, fault.specinfo);
1559 /* Process THI fault */
1560 if (eicr & FM10K_EIMR_THI_FAULT) {
1561 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1564 switch (fault.type) {
1566 estr = "THI_NO_FAULT"; break;
1567 case THI_MAL_DIS_Q_FAULT:
1568 estr = "THI_MAL_DIS_Q_FAULT"; break;
1572 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1573 estr, fault.func ? "VF" : "PF", fault.func,
1574 fault.address, fault.specinfo);
1577 /* Process FUM fault */
1578 if (eicr & FM10K_EIMR_FUM_FAULT) {
1579 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1582 switch (fault.type) {
1584 estr = "FUM_NO_FAULT"; break;
1585 case FUM_UNMAPPED_ADDR:
1586 estr = "FUM_UNMAPPED_ADDR"; break;
1587 case FUM_POISONED_TLP:
1588 estr = "FUM_POISONED_TLP"; break;
1589 case FUM_BAD_VF_QACCESS:
1590 estr = "FUM_BAD_VF_QACCESS"; break;
1591 case FUM_ADD_DECODE_ERR:
1592 estr = "FUM_ADD_DECODE_ERR"; break;
1594 estr = "FUM_RO_ERROR"; break;
1595 case FUM_QPRC_CRC_ERROR:
1596 estr = "FUM_QPRC_CRC_ERROR"; break;
1597 case FUM_CSR_TIMEOUT:
1598 estr = "FUM_CSR_TIMEOUT"; break;
1599 case FUM_INVALID_TYPE:
1600 estr = "FUM_INVALID_TYPE"; break;
1601 case FUM_INVALID_LENGTH:
1602 estr = "FUM_INVALID_LENGTH"; break;
1603 case FUM_INVALID_BE:
1604 estr = "FUM_INVALID_BE"; break;
1605 case FUM_INVALID_ALIGN:
1606 estr = "FUM_INVALID_ALIGN"; break;
1610 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1611 estr, fault.func ? "VF" : "PF", fault.func,
1612 fault.address, fault.specinfo);
1619 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1624 * PF interrupt handler triggered by NIC for handling specific interrupt.
1627 * Pointer to interrupt handle.
1629 * The address of parameter (struct rte_eth_dev *) regsitered before.
1635 fm10k_dev_interrupt_handler_pf(
1636 __rte_unused struct rte_intr_handle *handle,
1639 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1640 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1641 uint32_t cause, status;
1643 if (hw->mac.type != fm10k_mac_pf)
1646 cause = FM10K_READ_REG(hw, FM10K_EICR);
1648 /* Handle PCI fault cases */
1649 if (cause & FM10K_EICR_FAULT_MASK) {
1650 PMD_INIT_LOG(ERR, "INT: find fault!");
1651 fm10k_dev_handle_fault(hw, cause);
1654 /* Handle switch up/down */
1655 if (cause & FM10K_EICR_SWITCHNOTREADY)
1656 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1658 if (cause & FM10K_EICR_SWITCHREADY)
1659 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1661 /* Handle mailbox message */
1663 hw->mbx.ops.process(hw, &hw->mbx);
1664 fm10k_mbx_unlock(hw);
1666 /* Handle SRAM error */
1667 if (cause & FM10K_EICR_SRAMERROR) {
1668 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1670 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1671 /* Write to clear pending bits */
1672 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1674 /* Todo: print out error message after shared code updates */
1677 /* Clear these 3 events if having any */
1678 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1679 FM10K_EICR_SWITCHREADY;
1681 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1683 /* Re-enable interrupt from device side */
1684 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1685 FM10K_ITR_MASK_CLEAR);
1686 /* Re-enable interrupt from host side */
1687 rte_intr_enable(&(dev->pci_dev->intr_handle));
1691 * VF interrupt handler triggered by NIC for handling specific interrupt.
1694 * Pointer to interrupt handle.
1696 * The address of parameter (struct rte_eth_dev *) regsitered before.
1702 fm10k_dev_interrupt_handler_vf(
1703 __rte_unused struct rte_intr_handle *handle,
1706 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1707 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1709 if (hw->mac.type != fm10k_mac_vf)
1712 /* Handle mailbox message if lock is acquired */
1714 hw->mbx.ops.process(hw, &hw->mbx);
1715 fm10k_mbx_unlock(hw);
1717 /* Re-enable interrupt from device side */
1718 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1719 FM10K_ITR_MASK_CLEAR);
1720 /* Re-enable interrupt from host side */
1721 rte_intr_enable(&(dev->pci_dev->intr_handle));
1724 /* Mailbox message handler in VF */
1725 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1726 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1727 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1728 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1729 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1732 /* Mailbox message handler in PF */
1733 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1734 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1735 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1736 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1737 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1738 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1739 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1740 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1744 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1748 /* Initialize mailbox lock */
1749 fm10k_mbx_initlock(hw);
1751 /* Replace default message handler with new ones */
1752 if (hw->mac.type == fm10k_mac_pf)
1753 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1755 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1758 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1762 /* Connect to SM for PF device or PF for VF device */
1763 return hw->mbx.ops.connect(hw, &hw->mbx);
1767 fm10k_close_mbx_service(struct fm10k_hw *hw)
1769 /* Disconnect from SM for PF device or PF for VF device */
1770 hw->mbx.ops.disconnect(hw, &hw->mbx);
1773 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1774 .dev_configure = fm10k_dev_configure,
1775 .dev_start = fm10k_dev_start,
1776 .dev_stop = fm10k_dev_stop,
1777 .dev_close = fm10k_dev_close,
1778 .promiscuous_enable = fm10k_dev_promiscuous_enable,
1779 .promiscuous_disable = fm10k_dev_promiscuous_disable,
1780 .allmulticast_enable = fm10k_dev_allmulticast_enable,
1781 .allmulticast_disable = fm10k_dev_allmulticast_disable,
1782 .stats_get = fm10k_stats_get,
1783 .stats_reset = fm10k_stats_reset,
1784 .link_update = fm10k_link_update,
1785 .dev_infos_get = fm10k_dev_infos_get,
1786 .vlan_filter_set = fm10k_vlan_filter_set,
1787 .rx_queue_start = fm10k_dev_rx_queue_start,
1788 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1789 .tx_queue_start = fm10k_dev_tx_queue_start,
1790 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1791 .rx_queue_setup = fm10k_rx_queue_setup,
1792 .rx_queue_release = fm10k_rx_queue_release,
1793 .tx_queue_setup = fm10k_tx_queue_setup,
1794 .tx_queue_release = fm10k_tx_queue_release,
1795 .reta_update = fm10k_reta_update,
1796 .reta_query = fm10k_reta_query,
1797 .rss_hash_update = fm10k_rss_hash_update,
1798 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1802 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1804 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1807 PMD_INIT_FUNC_TRACE();
1809 dev->dev_ops = &fm10k_eth_dev_ops;
1810 dev->rx_pkt_burst = &fm10k_recv_pkts;
1811 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1813 if (dev->data->scattered_rx)
1814 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
1816 /* only initialize in the primary process */
1817 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1820 /* Vendor and Device ID need to be set before init of shared code */
1821 memset(hw, 0, sizeof(*hw));
1822 hw->device_id = dev->pci_dev->id.device_id;
1823 hw->vendor_id = dev->pci_dev->id.vendor_id;
1824 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1825 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1826 hw->revision_id = 0;
1827 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1828 if (hw->hw_addr == NULL) {
1829 PMD_INIT_LOG(ERR, "Bad mem resource."
1830 " Try to blacklist unused devices.");
1834 /* Store fm10k_adapter pointer */
1835 hw->back = dev->data->dev_private;
1837 /* Initialize the shared code */
1838 diag = fm10k_init_shared_code(hw);
1839 if (diag != FM10K_SUCCESS) {
1840 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1845 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1846 * there is no way to get link status without reading BAR4. Until this
1847 * works, assume we have maximum bandwidth.
1848 * @todo - fix bus info
1850 hw->bus_caps.speed = fm10k_bus_speed_8000;
1851 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1852 hw->bus_caps.payload = fm10k_bus_payload_512;
1853 hw->bus.speed = fm10k_bus_speed_8000;
1854 hw->bus.width = fm10k_bus_width_pcie_x8;
1855 hw->bus.payload = fm10k_bus_payload_256;
1857 /* Initialize the hw */
1858 diag = fm10k_init_hw(hw);
1859 if (diag != FM10K_SUCCESS) {
1860 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1864 /* Initialize MAC address(es) */
1865 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1866 if (dev->data->mac_addrs == NULL) {
1867 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1871 diag = fm10k_read_mac_addr(hw);
1872 if (diag != FM10K_SUCCESS) {
1874 * TODO: remove special handling on VF. Need shared code to
1877 if (hw->mac.type == fm10k_mac_pf) {
1878 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1881 /* Generate a random addr */
1882 eth_random_addr(hw->mac.addr);
1883 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1887 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1888 &dev->data->mac_addrs[0]);
1890 /* Reset the hw statistics */
1891 fm10k_stats_reset(dev);
1894 diag = fm10k_reset_hw(hw);
1895 if (diag != FM10K_SUCCESS) {
1896 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1900 /* Setup mailbox service */
1901 diag = fm10k_setup_mbx_service(hw);
1902 if (diag != FM10K_SUCCESS) {
1903 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1907 /*PF/VF has different interrupt handling mechanism */
1908 if (hw->mac.type == fm10k_mac_pf) {
1909 /* register callback func to eal lib */
1910 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1911 fm10k_dev_interrupt_handler_pf, (void *)dev);
1913 /* enable MISC interrupt */
1914 fm10k_dev_enable_intr_pf(dev);
1916 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
1917 fm10k_dev_interrupt_handler_vf, (void *)dev);
1919 fm10k_dev_enable_intr_vf(dev);
1923 * Below function will trigger operations on mailbox, acquire lock to
1924 * avoid race condition from interrupt handler. Operations on mailbox
1925 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1926 * will handle and generate an interrupt to our side. Then, FIFO in
1927 * mailbox will be touched.
1930 /* Enable port first */
1931 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1933 /* Update default vlan */
1934 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1937 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1938 * unused for VF. PF will assign correct glort for VF.
1940 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1941 hw->mac.default_vid, 1, 0);
1943 /* Set unicast mode by default. App can change to other mode in other
1946 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1947 FM10K_XCAST_MODE_NONE);
1949 fm10k_mbx_unlock(hw);
1951 /* enable uio intr after callback registered */
1952 rte_intr_enable(&(dev->pci_dev->intr_handle));
1958 * The set of PCI devices this driver supports. This driver will enable both PF
1959 * and SRIOV-VF devices.
1961 static const struct rte_pci_id pci_id_fm10k_map[] = {
1962 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1963 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1964 #include "rte_pci_dev_ids.h"
1965 { .vendor_id = 0, /* sentinel */ },
1968 static struct eth_driver rte_pmd_fm10k = {
1970 .name = "rte_pmd_fm10k",
1971 .id_table = pci_id_fm10k_map,
1972 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1974 .eth_dev_init = eth_fm10k_dev_init,
1975 .dev_private_size = sizeof(struct fm10k_adapter),
1979 * Driver initialization routine.
1980 * Invoked once at EAL init time.
1981 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1984 rte_pmd_fm10k_init(__rte_unused const char *name,
1985 __rte_unused const char *params)
1987 PMD_INIT_FUNC_TRACE();
1988 rte_eth_driver_register(&rte_pmd_fm10k);
1992 static struct rte_driver rte_fm10k_driver = {
1994 .init = rte_pmd_fm10k_init,
1997 PMD_REGISTER_DRIVER(rte_fm10k_driver);