4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 /* Number of chars per uint32 type */
50 #define CHARS_PER_UINT32 (sizeof(uint32_t))
51 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
56 fm10k_mbx_initlock(struct fm10k_hw *hw)
58 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
62 fm10k_mbx_lock(struct fm10k_hw *hw)
64 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
65 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
69 fm10k_mbx_unlock(struct fm10k_hw *hw)
71 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
75 * reset queue to initial state, allocate software buffers used when starting
78 * return -ENOMEM if buffers cannot be allocated
79 * return -EINVAL if buffers do not satisfy alignment condition
82 rx_queue_reset(struct fm10k_rx_queue *q)
86 PMD_INIT_FUNC_TRACE();
88 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
92 for (i = 0; i < q->nb_desc; ++i) {
93 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
94 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
95 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
99 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
100 q->hw_ring[i].q.pkt_addr = dma_addr;
101 q->hw_ring[i].q.hdr_addr = dma_addr;
106 q->next_trigger = q->alloc_thresh - 1;
107 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
112 * clean queue, descriptor rings, free software buffers used when stopping
116 rx_queue_clean(struct fm10k_rx_queue *q)
118 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
120 PMD_INIT_FUNC_TRACE();
122 /* zero descriptor rings */
123 for (i = 0; i < q->nb_desc; ++i)
124 q->hw_ring[i] = zero;
126 /* free software buffers */
127 for (i = 0; i < q->nb_desc; ++i) {
129 rte_pktmbuf_free_seg(q->sw_ring[i]);
130 q->sw_ring[i] = NULL;
136 * free all queue memory used when releasing the queue (i.e. configure)
139 rx_queue_free(struct fm10k_rx_queue *q)
141 PMD_INIT_FUNC_TRACE();
143 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
146 rte_free(q->sw_ring);
152 * disable RX queue, wait unitl HW finished necessary flush operation
155 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
159 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
160 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
161 reg & ~FM10K_RXQCTL_ENABLE);
163 /* Wait 100us at most */
164 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
166 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
167 if (!(reg & FM10K_RXQCTL_ENABLE))
171 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
178 * reset queue to initial state, allocate software buffers used when starting
182 tx_queue_reset(struct fm10k_tx_queue *q)
184 PMD_INIT_FUNC_TRACE();
188 q->nb_free = q->nb_desc - 1;
189 q->free_trigger = q->nb_free - q->free_thresh;
190 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
191 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
195 * clean queue, descriptor rings, free software buffers used when stopping
199 tx_queue_clean(struct fm10k_tx_queue *q)
201 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
203 PMD_INIT_FUNC_TRACE();
205 /* zero descriptor rings */
206 for (i = 0; i < q->nb_desc; ++i)
207 q->hw_ring[i] = zero;
209 /* free software buffers */
210 for (i = 0; i < q->nb_desc; ++i) {
212 rte_pktmbuf_free_seg(q->sw_ring[i]);
213 q->sw_ring[i] = NULL;
219 * free all queue memory used when releasing the queue (i.e. configure)
222 tx_queue_free(struct fm10k_tx_queue *q)
224 PMD_INIT_FUNC_TRACE();
226 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
228 if (q->rs_tracker.list)
229 rte_free(q->rs_tracker.list);
231 rte_free(q->sw_ring);
237 * disable TX queue, wait unitl HW finished necessary flush operation
240 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
244 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
245 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
246 reg & ~FM10K_TXDCTL_ENABLE);
248 /* Wait 100us at most */
249 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
251 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
252 if (!(reg & FM10K_TXDCTL_ENABLE))
256 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
263 fm10k_dev_configure(struct rte_eth_dev *dev)
265 PMD_INIT_FUNC_TRACE();
267 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
268 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
274 fm10k_dev_tx_init(struct rte_eth_dev *dev)
276 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
278 struct fm10k_tx_queue *txq;
282 /* Disable TXINT to avoid possible interrupt */
283 for (i = 0; i < hw->mac.max_queues; i++)
284 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
285 3 << FM10K_TXINT_TIMER_SHIFT);
288 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
289 txq = dev->data->tx_queues[i];
290 base_addr = txq->hw_ring_phys_addr;
291 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
293 /* disable queue to avoid issues while updating state */
294 ret = tx_queue_disable(hw, i);
296 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
300 /* set location and size for descriptor ring */
301 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
302 base_addr & UINT64_LOWER_32BITS_MASK);
303 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
304 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
305 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
311 fm10k_dev_rx_init(struct rte_eth_dev *dev)
313 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
315 struct fm10k_rx_queue *rxq;
318 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
320 struct rte_pktmbuf_pool_private *mbp_priv;
322 /* Disable RXINT to avoid possible interrupt */
323 for (i = 0; i < hw->mac.max_queues; i++)
324 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
325 3 << FM10K_RXINT_TIMER_SHIFT);
327 /* Setup RX queues */
328 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
329 rxq = dev->data->rx_queues[i];
330 base_addr = rxq->hw_ring_phys_addr;
331 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
333 /* disable queue to avoid issues while updating state */
334 ret = rx_queue_disable(hw, i);
336 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
340 /* Setup the Base and Length of the Rx Descriptor Ring */
341 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
342 base_addr & UINT64_LOWER_32BITS_MASK);
343 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
344 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
345 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
347 /* Configure the Rx buffer size for one buff without split */
348 mbp_priv = rte_mempool_get_priv(rxq->mp);
349 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
350 RTE_PKTMBUF_HEADROOM);
351 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
352 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
354 /* Enable drop on empty, it's RO for VF */
355 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
356 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
358 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
359 FM10K_WRITE_FLUSH(hw);
366 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
368 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
371 struct fm10k_rx_queue *rxq;
373 PMD_INIT_FUNC_TRACE();
375 if (rx_queue_id < dev->data->nb_rx_queues) {
376 rxq = dev->data->rx_queues[rx_queue_id];
377 err = rx_queue_reset(rxq);
378 if (err == -ENOMEM) {
379 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
381 } else if (err == -EINVAL) {
382 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
387 /* Setup the HW Rx Head and Tail Descriptor Pointers
388 * Note: this must be done AFTER the queue is enabled on real
389 * hardware, but BEFORE the queue is enabled when using the
390 * emulation platform. Do it in both places for now and remove
391 * this comment and the following two register writes when the
392 * emulation platform is no longer being used.
394 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
395 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
397 /* Set PF ownership flag for PF devices */
398 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
399 if (hw->mac.type == fm10k_mac_pf)
400 reg |= FM10K_RXQCTL_PF;
401 reg |= FM10K_RXQCTL_ENABLE;
402 /* enable RX queue */
403 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
404 FM10K_WRITE_FLUSH(hw);
406 /* Setup the HW Rx Head and Tail Descriptor Pointers
407 * Note: this must be done AFTER the queue is enabled
409 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
410 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
417 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
419 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
421 PMD_INIT_FUNC_TRACE();
423 if (rx_queue_id < dev->data->nb_rx_queues) {
424 /* Disable RX queue */
425 rx_queue_disable(hw, rx_queue_id);
427 /* Free mbuf and clean HW ring */
428 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
435 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
437 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 /** @todo - this should be defined in the shared code */
439 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
440 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
443 PMD_INIT_FUNC_TRACE();
445 if (tx_queue_id < dev->data->nb_tx_queues) {
446 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
448 /* reset head and tail pointers */
449 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
450 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
452 /* enable TX queue */
453 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
454 FM10K_TXDCTL_ENABLE | txdctl);
455 FM10K_WRITE_FLUSH(hw);
463 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
465 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
467 PMD_INIT_FUNC_TRACE();
469 if (tx_queue_id < dev->data->nb_tx_queues) {
470 tx_queue_disable(hw, tx_queue_id);
471 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
477 /* fls = find last set bit = 32 minus the number of leading zeros */
479 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
481 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
483 fm10k_dev_start(struct rte_eth_dev *dev)
485 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488 PMD_INIT_FUNC_TRACE();
490 /* stop, init, then start the hw */
491 diag = fm10k_stop_hw(hw);
492 if (diag != FM10K_SUCCESS) {
493 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
497 diag = fm10k_init_hw(hw);
498 if (diag != FM10K_SUCCESS) {
499 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
503 diag = fm10k_start_hw(hw);
504 if (diag != FM10K_SUCCESS) {
505 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
509 diag = fm10k_dev_tx_init(dev);
511 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
515 diag = fm10k_dev_rx_init(dev);
517 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
521 if (hw->mac.type == fm10k_mac_pf) {
522 /* Establish only VSI 0 as valid */
523 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
525 /* Configure RSS bits used in RETA table */
526 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
527 fls(dev->data->nb_rx_queues - 1) <<
528 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
530 /* Invalidate all other GLORT entries */
531 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
532 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
533 FM10K_DGLORTMAP_NONE);
536 for (i = 0; i < dev->data->nb_rx_queues; i++) {
537 struct fm10k_rx_queue *rxq;
538 rxq = dev->data->rx_queues[i];
540 if (rxq->rx_deferred_start)
542 diag = fm10k_dev_rx_queue_start(dev, i);
545 for (j = 0; j < i; ++j)
546 rx_queue_clean(dev->data->rx_queues[j]);
551 for (i = 0; i < dev->data->nb_tx_queues; i++) {
552 struct fm10k_tx_queue *txq;
553 txq = dev->data->tx_queues[i];
555 if (txq->tx_deferred_start)
557 diag = fm10k_dev_tx_queue_start(dev, i);
560 for (j = 0; j < dev->data->nb_rx_queues; ++j)
561 rx_queue_clean(dev->data->rx_queues[j]);
570 fm10k_dev_stop(struct rte_eth_dev *dev)
574 PMD_INIT_FUNC_TRACE();
576 for (i = 0; i < dev->data->nb_tx_queues; i++)
577 fm10k_dev_tx_queue_stop(dev, i);
579 for (i = 0; i < dev->data->nb_rx_queues; i++)
580 fm10k_dev_rx_queue_stop(dev, i);
584 fm10k_dev_close(struct rte_eth_dev *dev)
586 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
588 PMD_INIT_FUNC_TRACE();
590 /* Stop mailbox service first */
591 fm10k_close_mbx_service(hw);
597 fm10k_link_update(struct rte_eth_dev *dev,
598 __rte_unused int wait_to_complete)
600 PMD_INIT_FUNC_TRACE();
602 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
603 * x8 PCIe interface. For now, we leave the speed undefined since there
604 * is no 50Gbps Ethernet. */
605 dev->data->dev_link.link_speed = 0;
606 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
607 dev->data->dev_link.link_status = 1;
613 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
615 uint64_t ipackets, opackets, ibytes, obytes;
616 struct fm10k_hw *hw =
617 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
618 struct fm10k_hw_stats *hw_stats =
619 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
622 PMD_INIT_FUNC_TRACE();
624 fm10k_update_hw_stats(hw, hw_stats);
626 ipackets = opackets = ibytes = obytes = 0;
627 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
628 (i < FM10K_MAX_QUEUES_PF); ++i) {
629 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
630 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
631 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
632 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
633 ipackets += stats->q_ipackets[i];
634 opackets += stats->q_opackets[i];
635 ibytes += stats->q_ibytes[i];
636 obytes += stats->q_obytes[i];
638 stats->ipackets = ipackets;
639 stats->opackets = opackets;
640 stats->ibytes = ibytes;
641 stats->obytes = obytes;
645 fm10k_stats_reset(struct rte_eth_dev *dev)
647 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
648 struct fm10k_hw_stats *hw_stats =
649 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
651 PMD_INIT_FUNC_TRACE();
653 memset(hw_stats, 0, sizeof(*hw_stats));
654 fm10k_rebind_hw_stats(hw, hw_stats);
658 fm10k_dev_infos_get(struct rte_eth_dev *dev,
659 struct rte_eth_dev_info *dev_info)
661 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
663 PMD_INIT_FUNC_TRACE();
665 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
666 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
667 dev_info->max_rx_queues = hw->mac.max_queues;
668 dev_info->max_tx_queues = hw->mac.max_queues;
669 dev_info->max_mac_addrs = 1;
670 dev_info->max_hash_mac_addrs = 0;
671 dev_info->max_vfs = FM10K_MAX_VF_NUM;
672 dev_info->max_vmdq_pools = ETH_64_POOLS;
673 dev_info->rx_offload_capa =
674 DEV_RX_OFFLOAD_IPV4_CKSUM |
675 DEV_RX_OFFLOAD_UDP_CKSUM |
676 DEV_RX_OFFLOAD_TCP_CKSUM;
677 dev_info->tx_offload_capa = 0;
678 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
680 dev_info->default_rxconf = (struct rte_eth_rxconf) {
682 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
683 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
684 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
686 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
690 dev_info->default_txconf = (struct rte_eth_txconf) {
692 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
693 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
694 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
696 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
697 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
698 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
699 ETH_TXQ_FLAGS_NOOFFLOADS,
705 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
707 if ((request < min) || (request > max) || ((request % mult) != 0))
714 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
715 * the physical address is required. If the memzone is already created, then
716 * this function returns a pointer to the existing memzone.
718 static inline const struct rte_memzone *
719 allocate_hw_ring(const char *driver_name, const char *ring_name,
720 uint8_t port_id, uint16_t queue_id, int socket_id,
721 uint32_t size, uint32_t align)
723 char name[RTE_MEMZONE_NAMESIZE];
724 const struct rte_memzone *mz;
726 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
727 driver_name, ring_name, port_id, queue_id, socket_id);
729 /* return the memzone if it already exists */
730 mz = rte_memzone_lookup(name);
734 #ifdef RTE_LIBRTE_XEN_DOM0
735 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
738 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
743 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
745 if ((request < min) || (request > max) || ((div % request) != 0))
752 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
754 uint16_t rx_free_thresh;
756 if (conf->rx_free_thresh == 0)
757 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
759 rx_free_thresh = conf->rx_free_thresh;
761 /* make sure the requested threshold satisfies the constraints */
762 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
763 FM10K_RX_FREE_THRESH_MAX(q),
764 FM10K_RX_FREE_THRESH_DIV(q),
766 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
767 "less than or equal to %u, "
768 "greater than or equal to %u, "
769 "and a divisor of %u",
770 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
771 FM10K_RX_FREE_THRESH_MIN(q),
772 FM10K_RX_FREE_THRESH_DIV(q));
776 q->alloc_thresh = rx_free_thresh;
777 q->drop_en = conf->rx_drop_en;
778 q->rx_deferred_start = conf->rx_deferred_start;
784 * Hardware requires specific alignment for Rx packet buffers. At
785 * least one of the following two conditions must be satisfied.
786 * 1. Address is 512B aligned
787 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
789 * As such, the driver may need to adjust the DMA address within the
790 * buffer by up to 512B. The mempool element size is checked here
791 * to make sure a maximally sized Ethernet frame can still be wholly
792 * contained within the buffer after 512B alignment.
794 * return 1 if the element size is valid, otherwise return 0.
797 mempool_element_size_valid(struct rte_mempool *mp)
801 /* elt_size includes mbuf header and headroom */
802 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
803 RTE_PKTMBUF_HEADROOM;
805 /* account for up to 512B of alignment */
806 min_size -= FM10K_RX_BUFF_ALIGN;
808 /* sanity check for overflow */
809 if (min_size > mp->elt_size)
812 if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
820 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
821 uint16_t nb_desc, unsigned int socket_id,
822 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
824 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
825 struct fm10k_rx_queue *q;
826 const struct rte_memzone *mz;
828 PMD_INIT_FUNC_TRACE();
830 /* make sure the mempool element size can account for alignment. */
831 if (!mempool_element_size_valid(mp)) {
832 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
836 /* make sure a valid number of descriptors have been requested */
837 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
838 FM10K_MULT_RX_DESC, nb_desc)) {
839 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
840 "less than or equal to %"PRIu32", "
841 "greater than or equal to %u, "
842 "and a multiple of %u",
843 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
849 * if this queue existed already, free the associated memory. The
850 * queue cannot be reused in case we need to allocate memory on
851 * different socket than was previously used.
853 if (dev->data->rx_queues[queue_id] != NULL) {
854 rx_queue_free(dev->data->rx_queues[queue_id]);
855 dev->data->rx_queues[queue_id] = NULL;
858 /* allocate memory for the queue structure */
859 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
862 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
868 q->nb_desc = nb_desc;
869 q->port_id = dev->data->port_id;
870 q->queue_id = queue_id;
871 q->tail_ptr = (volatile uint32_t *)
872 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
873 if (handle_rxconf(q, conf))
876 /* allocate memory for the software ring */
877 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
878 nb_desc * sizeof(struct rte_mbuf *),
879 RTE_CACHE_LINE_SIZE, socket_id);
880 if (q->sw_ring == NULL) {
881 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
887 * allocate memory for the hardware descriptor ring. A memzone large
888 * enough to hold the maximum ring size is requested to allow for
889 * resizing in later calls to the queue setup function.
891 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
892 dev->data->port_id, queue_id, socket_id,
893 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
895 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
896 rte_free(q->sw_ring);
900 q->hw_ring = mz->addr;
901 q->hw_ring_phys_addr = mz->phys_addr;
903 dev->data->rx_queues[queue_id] = q;
908 fm10k_rx_queue_release(void *queue)
910 PMD_INIT_FUNC_TRACE();
912 rx_queue_free(queue);
916 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
918 uint16_t tx_free_thresh;
919 uint16_t tx_rs_thresh;
921 /* constraint MACROs require that tx_free_thresh is configured
922 * before tx_rs_thresh */
923 if (conf->tx_free_thresh == 0)
924 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
926 tx_free_thresh = conf->tx_free_thresh;
928 /* make sure the requested threshold satisfies the constraints */
929 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
930 FM10K_TX_FREE_THRESH_MAX(q),
931 FM10K_TX_FREE_THRESH_DIV(q),
933 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
934 "less than or equal to %u, "
935 "greater than or equal to %u, "
936 "and a divisor of %u",
937 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
938 FM10K_TX_FREE_THRESH_MIN(q),
939 FM10K_TX_FREE_THRESH_DIV(q));
943 q->free_thresh = tx_free_thresh;
945 if (conf->tx_rs_thresh == 0)
946 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
948 tx_rs_thresh = conf->tx_rs_thresh;
950 q->tx_deferred_start = conf->tx_deferred_start;
952 /* make sure the requested threshold satisfies the constraints */
953 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
954 FM10K_TX_RS_THRESH_MAX(q),
955 FM10K_TX_RS_THRESH_DIV(q),
957 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
958 "less than or equal to %u, "
959 "greater than or equal to %u, "
960 "and a divisor of %u",
961 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
962 FM10K_TX_RS_THRESH_MIN(q),
963 FM10K_TX_RS_THRESH_DIV(q));
967 q->rs_thresh = tx_rs_thresh;
973 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
974 uint16_t nb_desc, unsigned int socket_id,
975 const struct rte_eth_txconf *conf)
977 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978 struct fm10k_tx_queue *q;
979 const struct rte_memzone *mz;
981 PMD_INIT_FUNC_TRACE();
983 /* make sure a valid number of descriptors have been requested */
984 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
985 FM10K_MULT_TX_DESC, nb_desc)) {
986 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
987 "less than or equal to %"PRIu32", "
988 "greater than or equal to %u, "
989 "and a multiple of %u",
990 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
996 * if this queue existed already, free the associated memory. The
997 * queue cannot be reused in case we need to allocate memory on
998 * different socket than was previously used.
1000 if (dev->data->tx_queues[queue_id] != NULL) {
1001 tx_queue_free(dev->data->tx_queues[queue_id]);
1002 dev->data->tx_queues[queue_id] = NULL;
1005 /* allocate memory for the queue structure */
1006 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1009 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1014 q->nb_desc = nb_desc;
1015 q->port_id = dev->data->port_id;
1016 q->queue_id = queue_id;
1017 q->tail_ptr = (volatile uint32_t *)
1018 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1019 if (handle_txconf(q, conf))
1022 /* allocate memory for the software ring */
1023 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1024 nb_desc * sizeof(struct rte_mbuf *),
1025 RTE_CACHE_LINE_SIZE, socket_id);
1026 if (q->sw_ring == NULL) {
1027 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1033 * allocate memory for the hardware descriptor ring. A memzone large
1034 * enough to hold the maximum ring size is requested to allow for
1035 * resizing in later calls to the queue setup function.
1037 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1038 dev->data->port_id, queue_id, socket_id,
1039 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1041 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1042 rte_free(q->sw_ring);
1046 q->hw_ring = mz->addr;
1047 q->hw_ring_phys_addr = mz->phys_addr;
1050 * allocate memory for the RS bit tracker. Enough slots to hold the
1051 * descriptor index for each RS bit needing to be set are required.
1053 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1054 ((nb_desc + 1) / q->rs_thresh) *
1056 RTE_CACHE_LINE_SIZE, socket_id);
1057 if (q->rs_tracker.list == NULL) {
1058 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1059 rte_free(q->sw_ring);
1064 dev->data->tx_queues[queue_id] = q;
1069 fm10k_tx_queue_release(void *queue)
1071 PMD_INIT_FUNC_TRACE();
1073 tx_queue_free(queue);
1077 fm10k_reta_update(struct rte_eth_dev *dev,
1078 struct rte_eth_rss_reta_entry64 *reta_conf,
1081 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1082 uint16_t i, j, idx, shift;
1086 PMD_INIT_FUNC_TRACE();
1088 if (reta_size > FM10K_MAX_RSS_INDICES) {
1089 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1090 "(%d) doesn't match the number hardware can supported "
1091 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1096 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1097 * 128-entries in 32 registers
1099 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1100 idx = i / RTE_RETA_GROUP_SIZE;
1101 shift = i % RTE_RETA_GROUP_SIZE;
1102 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1103 BIT_MASK_PER_UINT32);
1108 if (mask != BIT_MASK_PER_UINT32)
1109 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1111 for (j = 0; j < CHARS_PER_UINT32; j++) {
1112 if (mask & (0x1 << j)) {
1114 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1115 reta |= reta_conf[idx].reta[shift + j] <<
1119 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1126 fm10k_reta_query(struct rte_eth_dev *dev,
1127 struct rte_eth_rss_reta_entry64 *reta_conf,
1130 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1131 uint16_t i, j, idx, shift;
1135 PMD_INIT_FUNC_TRACE();
1137 if (reta_size < FM10K_MAX_RSS_INDICES) {
1138 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1139 "(%d) doesn't match the number hardware can supported "
1140 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1145 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1146 * 128-entries in 32 registers
1148 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1149 idx = i / RTE_RETA_GROUP_SIZE;
1150 shift = i % RTE_RETA_GROUP_SIZE;
1151 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1152 BIT_MASK_PER_UINT32);
1156 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1157 for (j = 0; j < CHARS_PER_UINT32; j++) {
1158 if (mask & (0x1 << j))
1159 reta_conf[idx].reta[shift + j] = ((reta >>
1160 CHAR_BIT * j) & UINT8_MAX);
1167 /* Mailbox message handler in VF */
1168 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1169 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1170 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1171 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1172 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1175 /* Mailbox message handler in PF */
1176 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1177 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1178 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1179 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1180 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1181 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1182 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1183 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1187 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1191 /* Initialize mailbox lock */
1192 fm10k_mbx_initlock(hw);
1194 /* Replace default message handler with new ones */
1195 if (hw->mac.type == fm10k_mac_pf)
1196 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1198 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1201 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1205 /* Connect to SM for PF device or PF for VF device */
1206 return hw->mbx.ops.connect(hw, &hw->mbx);
1210 fm10k_close_mbx_service(struct fm10k_hw *hw)
1212 /* Disconnect from SM for PF device or PF for VF device */
1213 hw->mbx.ops.disconnect(hw, &hw->mbx);
1216 static struct eth_dev_ops fm10k_eth_dev_ops = {
1217 .dev_configure = fm10k_dev_configure,
1218 .dev_start = fm10k_dev_start,
1219 .dev_stop = fm10k_dev_stop,
1220 .dev_close = fm10k_dev_close,
1221 .stats_get = fm10k_stats_get,
1222 .stats_reset = fm10k_stats_reset,
1223 .link_update = fm10k_link_update,
1224 .dev_infos_get = fm10k_dev_infos_get,
1225 .rx_queue_start = fm10k_dev_rx_queue_start,
1226 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1227 .tx_queue_start = fm10k_dev_tx_queue_start,
1228 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1229 .rx_queue_setup = fm10k_rx_queue_setup,
1230 .rx_queue_release = fm10k_rx_queue_release,
1231 .tx_queue_setup = fm10k_tx_queue_setup,
1232 .tx_queue_release = fm10k_tx_queue_release,
1233 .reta_update = fm10k_reta_update,
1234 .reta_query = fm10k_reta_query,
1238 eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
1239 struct rte_eth_dev *dev)
1241 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244 PMD_INIT_FUNC_TRACE();
1246 dev->dev_ops = &fm10k_eth_dev_ops;
1247 dev->rx_pkt_burst = &fm10k_recv_pkts;
1248 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1250 /* only initialize in the primary process */
1251 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1254 /* Vendor and Device ID need to be set before init of shared code */
1255 memset(hw, 0, sizeof(*hw));
1256 hw->device_id = dev->pci_dev->id.device_id;
1257 hw->vendor_id = dev->pci_dev->id.vendor_id;
1258 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1259 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1260 hw->revision_id = 0;
1261 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1262 if (hw->hw_addr == NULL) {
1263 PMD_INIT_LOG(ERR, "Bad mem resource."
1264 " Try to blacklist unused devices.");
1268 /* Store fm10k_adapter pointer */
1269 hw->back = dev->data->dev_private;
1271 /* Initialize the shared code */
1272 diag = fm10k_init_shared_code(hw);
1273 if (diag != FM10K_SUCCESS) {
1274 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1279 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1280 * there is no way to get link status without reading BAR4. Until this
1281 * works, assume we have maximum bandwidth.
1282 * @todo - fix bus info
1284 hw->bus_caps.speed = fm10k_bus_speed_8000;
1285 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1286 hw->bus_caps.payload = fm10k_bus_payload_512;
1287 hw->bus.speed = fm10k_bus_speed_8000;
1288 hw->bus.width = fm10k_bus_width_pcie_x8;
1289 hw->bus.payload = fm10k_bus_payload_256;
1291 /* Initialize the hw */
1292 diag = fm10k_init_hw(hw);
1293 if (diag != FM10K_SUCCESS) {
1294 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1298 /* Initialize MAC address(es) */
1299 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1300 if (dev->data->mac_addrs == NULL) {
1301 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1305 diag = fm10k_read_mac_addr(hw);
1306 if (diag != FM10K_SUCCESS) {
1308 * TODO: remove special handling on VF. Need shared code to
1311 if (hw->mac.type == fm10k_mac_pf) {
1312 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1315 /* Generate a random addr */
1316 eth_random_addr(hw->mac.addr);
1317 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1321 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1322 &dev->data->mac_addrs[0]);
1324 /* Reset the hw statistics */
1325 fm10k_stats_reset(dev);
1328 diag = fm10k_reset_hw(hw);
1329 if (diag != FM10K_SUCCESS) {
1330 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1334 /* Setup mailbox service */
1335 diag = fm10k_setup_mbx_service(hw);
1336 if (diag != FM10K_SUCCESS) {
1337 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1342 * Below function will trigger operations on mailbox, acquire lock to
1343 * avoid race condition from interrupt handler. Operations on mailbox
1344 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1345 * will handle and generate an interrupt to our side. Then, FIFO in
1346 * mailbox will be touched.
1349 /* Enable port first */
1350 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1352 /* Update default vlan */
1353 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1356 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1357 * unused for VF. PF will assign correct glort for VF.
1359 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1360 hw->mac.default_vid, 1, 0);
1362 /* Set unicast mode by default. App can change to other mode in other
1365 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1366 FM10K_XCAST_MODE_MULTI);
1368 fm10k_mbx_unlock(hw);
1374 * The set of PCI devices this driver supports. This driver will enable both PF
1375 * and SRIOV-VF devices.
1377 static struct rte_pci_id pci_id_fm10k_map[] = {
1378 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1379 #include "rte_pci_dev_ids.h"
1380 { .vendor_id = 0, /* sentinel */ },
1383 static struct eth_driver rte_pmd_fm10k = {
1385 .name = "rte_pmd_fm10k",
1386 .id_table = pci_id_fm10k_map,
1387 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1389 .eth_dev_init = eth_fm10k_dev_init,
1390 .dev_private_size = sizeof(struct fm10k_adapter),
1394 * Driver initialization routine.
1395 * Invoked once at EAL init time.
1396 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1399 rte_pmd_fm10k_init(__rte_unused const char *name,
1400 __rte_unused const char *params)
1402 PMD_INIT_FUNC_TRACE();
1403 rte_eth_driver_register(&rte_pmd_fm10k);
1407 static struct rte_driver rte_fm10k_driver = {
1409 .init = rte_pmd_fm10k_init,
1412 PMD_REGISTER_DRIVER(rte_fm10k_driver);