4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 /* Number of chars per uint32 type */
50 #define CHARS_PER_UINT32 (sizeof(uint32_t))
51 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
56 fm10k_mbx_initlock(struct fm10k_hw *hw)
58 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
62 fm10k_mbx_lock(struct fm10k_hw *hw)
64 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
65 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
69 fm10k_mbx_unlock(struct fm10k_hw *hw)
71 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
75 * reset queue to initial state, allocate software buffers used when starting
78 * return -ENOMEM if buffers cannot be allocated
79 * return -EINVAL if buffers do not satisfy alignment condition
82 rx_queue_reset(struct fm10k_rx_queue *q)
86 PMD_INIT_FUNC_TRACE();
88 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
92 for (i = 0; i < q->nb_desc; ++i) {
93 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
94 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
95 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
99 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
100 q->hw_ring[i].q.pkt_addr = dma_addr;
101 q->hw_ring[i].q.hdr_addr = dma_addr;
106 q->next_trigger = q->alloc_thresh - 1;
107 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
112 * clean queue, descriptor rings, free software buffers used when stopping
116 rx_queue_clean(struct fm10k_rx_queue *q)
118 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
120 PMD_INIT_FUNC_TRACE();
122 /* zero descriptor rings */
123 for (i = 0; i < q->nb_desc; ++i)
124 q->hw_ring[i] = zero;
126 /* free software buffers */
127 for (i = 0; i < q->nb_desc; ++i) {
129 rte_pktmbuf_free_seg(q->sw_ring[i]);
130 q->sw_ring[i] = NULL;
136 * free all queue memory used when releasing the queue (i.e. configure)
139 rx_queue_free(struct fm10k_rx_queue *q)
141 PMD_INIT_FUNC_TRACE();
143 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
146 rte_free(q->sw_ring);
152 * disable RX queue, wait unitl HW finished necessary flush operation
155 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
159 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
160 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
161 reg & ~FM10K_RXQCTL_ENABLE);
163 /* Wait 100us at most */
164 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
166 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
167 if (!(reg & FM10K_RXQCTL_ENABLE))
171 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
178 * reset queue to initial state, allocate software buffers used when starting
182 tx_queue_reset(struct fm10k_tx_queue *q)
184 PMD_INIT_FUNC_TRACE();
188 q->nb_free = q->nb_desc - 1;
189 q->free_trigger = q->nb_free - q->free_thresh;
190 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
191 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
195 * clean queue, descriptor rings, free software buffers used when stopping
199 tx_queue_clean(struct fm10k_tx_queue *q)
201 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
203 PMD_INIT_FUNC_TRACE();
205 /* zero descriptor rings */
206 for (i = 0; i < q->nb_desc; ++i)
207 q->hw_ring[i] = zero;
209 /* free software buffers */
210 for (i = 0; i < q->nb_desc; ++i) {
212 rte_pktmbuf_free_seg(q->sw_ring[i]);
213 q->sw_ring[i] = NULL;
219 * free all queue memory used when releasing the queue (i.e. configure)
222 tx_queue_free(struct fm10k_tx_queue *q)
224 PMD_INIT_FUNC_TRACE();
226 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
228 if (q->rs_tracker.list)
229 rte_free(q->rs_tracker.list);
231 rte_free(q->sw_ring);
237 * disable TX queue, wait unitl HW finished necessary flush operation
240 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
244 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
245 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
246 reg & ~FM10K_TXDCTL_ENABLE);
248 /* Wait 100us at most */
249 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
251 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
252 if (!(reg & FM10K_TXDCTL_ENABLE))
256 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
263 fm10k_dev_configure(struct rte_eth_dev *dev)
265 PMD_INIT_FUNC_TRACE();
267 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
268 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
274 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
276 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
277 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
278 uint32_t mrqc, *key, i, reta, j;
281 #define RSS_KEY_SIZE 40
282 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
283 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
284 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
285 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
286 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
287 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
290 if (dev->data->nb_rx_queues == 1 ||
291 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
292 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
295 /* random key is rss_intel_key (default) or user provided (rss_key) */
296 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
297 key = (uint32_t *)rss_intel_key;
299 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
301 /* Now fill our hash function seeds, 4 bytes at a time */
302 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
303 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
306 * Fill in redirection table
307 * The byte-swap is needed because NIC registers are in
308 * little-endian order.
311 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
312 if (j == dev->data->nb_rx_queues)
314 reta = (reta << CHAR_BIT) | j;
316 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
321 * Generate RSS hash based on packet types, TCP/UDP
322 * port numbers and/or IPv4/v6 src and dst addresses
324 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
326 mrqc |= (hf & ETH_RSS_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
327 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
328 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
329 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
330 mrqc |= (hf & ETH_RSS_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
331 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
332 mrqc |= (hf & ETH_RSS_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
333 mrqc |= (hf & ETH_RSS_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
334 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
337 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
342 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
346 fm10k_dev_tx_init(struct rte_eth_dev *dev)
348 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
350 struct fm10k_tx_queue *txq;
354 /* Disable TXINT to avoid possible interrupt */
355 for (i = 0; i < hw->mac.max_queues; i++)
356 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
357 3 << FM10K_TXINT_TIMER_SHIFT);
360 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
361 txq = dev->data->tx_queues[i];
362 base_addr = txq->hw_ring_phys_addr;
363 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
365 /* disable queue to avoid issues while updating state */
366 ret = tx_queue_disable(hw, i);
368 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
372 /* set location and size for descriptor ring */
373 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
374 base_addr & UINT64_LOWER_32BITS_MASK);
375 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
376 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
377 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
383 fm10k_dev_rx_init(struct rte_eth_dev *dev)
385 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387 struct fm10k_rx_queue *rxq;
390 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
392 struct rte_pktmbuf_pool_private *mbp_priv;
394 /* Disable RXINT to avoid possible interrupt */
395 for (i = 0; i < hw->mac.max_queues; i++)
396 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
397 3 << FM10K_RXINT_TIMER_SHIFT);
399 /* Setup RX queues */
400 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
401 rxq = dev->data->rx_queues[i];
402 base_addr = rxq->hw_ring_phys_addr;
403 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
405 /* disable queue to avoid issues while updating state */
406 ret = rx_queue_disable(hw, i);
408 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
412 /* Setup the Base and Length of the Rx Descriptor Ring */
413 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
414 base_addr & UINT64_LOWER_32BITS_MASK);
415 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
416 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
417 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
419 /* Configure the Rx buffer size for one buff without split */
420 mbp_priv = rte_mempool_get_priv(rxq->mp);
421 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
422 RTE_PKTMBUF_HEADROOM);
423 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
424 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
426 /* Enable drop on empty, it's RO for VF */
427 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
428 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
430 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
431 FM10K_WRITE_FLUSH(hw);
434 /* Configure RSS if applicable */
435 fm10k_dev_mq_rx_configure(dev);
440 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
442 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
445 struct fm10k_rx_queue *rxq;
447 PMD_INIT_FUNC_TRACE();
449 if (rx_queue_id < dev->data->nb_rx_queues) {
450 rxq = dev->data->rx_queues[rx_queue_id];
451 err = rx_queue_reset(rxq);
452 if (err == -ENOMEM) {
453 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
455 } else if (err == -EINVAL) {
456 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
461 /* Setup the HW Rx Head and Tail Descriptor Pointers
462 * Note: this must be done AFTER the queue is enabled on real
463 * hardware, but BEFORE the queue is enabled when using the
464 * emulation platform. Do it in both places for now and remove
465 * this comment and the following two register writes when the
466 * emulation platform is no longer being used.
468 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
469 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
471 /* Set PF ownership flag for PF devices */
472 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
473 if (hw->mac.type == fm10k_mac_pf)
474 reg |= FM10K_RXQCTL_PF;
475 reg |= FM10K_RXQCTL_ENABLE;
476 /* enable RX queue */
477 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
478 FM10K_WRITE_FLUSH(hw);
480 /* Setup the HW Rx Head and Tail Descriptor Pointers
481 * Note: this must be done AFTER the queue is enabled
483 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
484 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
491 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
493 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495 PMD_INIT_FUNC_TRACE();
497 if (rx_queue_id < dev->data->nb_rx_queues) {
498 /* Disable RX queue */
499 rx_queue_disable(hw, rx_queue_id);
501 /* Free mbuf and clean HW ring */
502 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
509 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
511 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
512 /** @todo - this should be defined in the shared code */
513 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
514 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
517 PMD_INIT_FUNC_TRACE();
519 if (tx_queue_id < dev->data->nb_tx_queues) {
520 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
522 /* reset head and tail pointers */
523 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
524 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
526 /* enable TX queue */
527 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
528 FM10K_TXDCTL_ENABLE | txdctl);
529 FM10K_WRITE_FLUSH(hw);
537 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
539 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
541 PMD_INIT_FUNC_TRACE();
543 if (tx_queue_id < dev->data->nb_tx_queues) {
544 tx_queue_disable(hw, tx_queue_id);
545 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
551 /* fls = find last set bit = 32 minus the number of leading zeros */
553 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
555 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
557 fm10k_dev_start(struct rte_eth_dev *dev)
559 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
562 PMD_INIT_FUNC_TRACE();
564 /* stop, init, then start the hw */
565 diag = fm10k_stop_hw(hw);
566 if (diag != FM10K_SUCCESS) {
567 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
571 diag = fm10k_init_hw(hw);
572 if (diag != FM10K_SUCCESS) {
573 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
577 diag = fm10k_start_hw(hw);
578 if (diag != FM10K_SUCCESS) {
579 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
583 diag = fm10k_dev_tx_init(dev);
585 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
589 diag = fm10k_dev_rx_init(dev);
591 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
595 if (hw->mac.type == fm10k_mac_pf) {
596 /* Establish only VSI 0 as valid */
597 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
599 /* Configure RSS bits used in RETA table */
600 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
601 fls(dev->data->nb_rx_queues - 1) <<
602 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
604 /* Invalidate all other GLORT entries */
605 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
606 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
607 FM10K_DGLORTMAP_NONE);
610 for (i = 0; i < dev->data->nb_rx_queues; i++) {
611 struct fm10k_rx_queue *rxq;
612 rxq = dev->data->rx_queues[i];
614 if (rxq->rx_deferred_start)
616 diag = fm10k_dev_rx_queue_start(dev, i);
619 for (j = 0; j < i; ++j)
620 rx_queue_clean(dev->data->rx_queues[j]);
625 for (i = 0; i < dev->data->nb_tx_queues; i++) {
626 struct fm10k_tx_queue *txq;
627 txq = dev->data->tx_queues[i];
629 if (txq->tx_deferred_start)
631 diag = fm10k_dev_tx_queue_start(dev, i);
634 for (j = 0; j < dev->data->nb_rx_queues; ++j)
635 rx_queue_clean(dev->data->rx_queues[j]);
644 fm10k_dev_stop(struct rte_eth_dev *dev)
648 PMD_INIT_FUNC_TRACE();
650 for (i = 0; i < dev->data->nb_tx_queues; i++)
651 fm10k_dev_tx_queue_stop(dev, i);
653 for (i = 0; i < dev->data->nb_rx_queues; i++)
654 fm10k_dev_rx_queue_stop(dev, i);
658 fm10k_dev_close(struct rte_eth_dev *dev)
660 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
662 PMD_INIT_FUNC_TRACE();
664 /* Stop mailbox service first */
665 fm10k_close_mbx_service(hw);
671 fm10k_link_update(struct rte_eth_dev *dev,
672 __rte_unused int wait_to_complete)
674 PMD_INIT_FUNC_TRACE();
676 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
677 * x8 PCIe interface. For now, we leave the speed undefined since there
678 * is no 50Gbps Ethernet. */
679 dev->data->dev_link.link_speed = 0;
680 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
681 dev->data->dev_link.link_status = 1;
687 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
689 uint64_t ipackets, opackets, ibytes, obytes;
690 struct fm10k_hw *hw =
691 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692 struct fm10k_hw_stats *hw_stats =
693 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
696 PMD_INIT_FUNC_TRACE();
698 fm10k_update_hw_stats(hw, hw_stats);
700 ipackets = opackets = ibytes = obytes = 0;
701 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
702 (i < FM10K_MAX_QUEUES_PF); ++i) {
703 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
704 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
705 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
706 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
707 ipackets += stats->q_ipackets[i];
708 opackets += stats->q_opackets[i];
709 ibytes += stats->q_ibytes[i];
710 obytes += stats->q_obytes[i];
712 stats->ipackets = ipackets;
713 stats->opackets = opackets;
714 stats->ibytes = ibytes;
715 stats->obytes = obytes;
719 fm10k_stats_reset(struct rte_eth_dev *dev)
721 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
722 struct fm10k_hw_stats *hw_stats =
723 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
725 PMD_INIT_FUNC_TRACE();
727 memset(hw_stats, 0, sizeof(*hw_stats));
728 fm10k_rebind_hw_stats(hw, hw_stats);
732 fm10k_dev_infos_get(struct rte_eth_dev *dev,
733 struct rte_eth_dev_info *dev_info)
735 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
737 PMD_INIT_FUNC_TRACE();
739 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
740 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
741 dev_info->max_rx_queues = hw->mac.max_queues;
742 dev_info->max_tx_queues = hw->mac.max_queues;
743 dev_info->max_mac_addrs = 1;
744 dev_info->max_hash_mac_addrs = 0;
745 dev_info->max_vfs = FM10K_MAX_VF_NUM;
746 dev_info->max_vmdq_pools = ETH_64_POOLS;
747 dev_info->rx_offload_capa =
748 DEV_RX_OFFLOAD_IPV4_CKSUM |
749 DEV_RX_OFFLOAD_UDP_CKSUM |
750 DEV_RX_OFFLOAD_TCP_CKSUM;
751 dev_info->tx_offload_capa = 0;
752 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
754 dev_info->default_rxconf = (struct rte_eth_rxconf) {
756 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
757 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
758 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
760 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
764 dev_info->default_txconf = (struct rte_eth_txconf) {
766 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
767 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
768 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
770 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
771 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
772 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
773 ETH_TXQ_FLAGS_NOOFFLOADS,
779 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
781 if ((request < min) || (request > max) || ((request % mult) != 0))
788 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
789 * the physical address is required. If the memzone is already created, then
790 * this function returns a pointer to the existing memzone.
792 static inline const struct rte_memzone *
793 allocate_hw_ring(const char *driver_name, const char *ring_name,
794 uint8_t port_id, uint16_t queue_id, int socket_id,
795 uint32_t size, uint32_t align)
797 char name[RTE_MEMZONE_NAMESIZE];
798 const struct rte_memzone *mz;
800 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
801 driver_name, ring_name, port_id, queue_id, socket_id);
803 /* return the memzone if it already exists */
804 mz = rte_memzone_lookup(name);
808 #ifdef RTE_LIBRTE_XEN_DOM0
809 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
812 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
817 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
819 if ((request < min) || (request > max) || ((div % request) != 0))
826 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
828 uint16_t rx_free_thresh;
830 if (conf->rx_free_thresh == 0)
831 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
833 rx_free_thresh = conf->rx_free_thresh;
835 /* make sure the requested threshold satisfies the constraints */
836 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
837 FM10K_RX_FREE_THRESH_MAX(q),
838 FM10K_RX_FREE_THRESH_DIV(q),
840 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
841 "less than or equal to %u, "
842 "greater than or equal to %u, "
843 "and a divisor of %u",
844 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
845 FM10K_RX_FREE_THRESH_MIN(q),
846 FM10K_RX_FREE_THRESH_DIV(q));
850 q->alloc_thresh = rx_free_thresh;
851 q->drop_en = conf->rx_drop_en;
852 q->rx_deferred_start = conf->rx_deferred_start;
858 * Hardware requires specific alignment for Rx packet buffers. At
859 * least one of the following two conditions must be satisfied.
860 * 1. Address is 512B aligned
861 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
863 * As such, the driver may need to adjust the DMA address within the
864 * buffer by up to 512B. The mempool element size is checked here
865 * to make sure a maximally sized Ethernet frame can still be wholly
866 * contained within the buffer after 512B alignment.
868 * return 1 if the element size is valid, otherwise return 0.
871 mempool_element_size_valid(struct rte_mempool *mp)
875 /* elt_size includes mbuf header and headroom */
876 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
877 RTE_PKTMBUF_HEADROOM;
879 /* account for up to 512B of alignment */
880 min_size -= FM10K_RX_BUFF_ALIGN;
882 /* sanity check for overflow */
883 if (min_size > mp->elt_size)
886 if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
894 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
895 uint16_t nb_desc, unsigned int socket_id,
896 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
898 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899 struct fm10k_rx_queue *q;
900 const struct rte_memzone *mz;
902 PMD_INIT_FUNC_TRACE();
904 /* make sure the mempool element size can account for alignment. */
905 if (!mempool_element_size_valid(mp)) {
906 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
910 /* make sure a valid number of descriptors have been requested */
911 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
912 FM10K_MULT_RX_DESC, nb_desc)) {
913 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
914 "less than or equal to %"PRIu32", "
915 "greater than or equal to %u, "
916 "and a multiple of %u",
917 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
923 * if this queue existed already, free the associated memory. The
924 * queue cannot be reused in case we need to allocate memory on
925 * different socket than was previously used.
927 if (dev->data->rx_queues[queue_id] != NULL) {
928 rx_queue_free(dev->data->rx_queues[queue_id]);
929 dev->data->rx_queues[queue_id] = NULL;
932 /* allocate memory for the queue structure */
933 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
936 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
942 q->nb_desc = nb_desc;
943 q->port_id = dev->data->port_id;
944 q->queue_id = queue_id;
945 q->tail_ptr = (volatile uint32_t *)
946 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
947 if (handle_rxconf(q, conf))
950 /* allocate memory for the software ring */
951 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
952 nb_desc * sizeof(struct rte_mbuf *),
953 RTE_CACHE_LINE_SIZE, socket_id);
954 if (q->sw_ring == NULL) {
955 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
961 * allocate memory for the hardware descriptor ring. A memzone large
962 * enough to hold the maximum ring size is requested to allow for
963 * resizing in later calls to the queue setup function.
965 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
966 dev->data->port_id, queue_id, socket_id,
967 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
969 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
970 rte_free(q->sw_ring);
974 q->hw_ring = mz->addr;
975 q->hw_ring_phys_addr = mz->phys_addr;
977 dev->data->rx_queues[queue_id] = q;
982 fm10k_rx_queue_release(void *queue)
984 PMD_INIT_FUNC_TRACE();
986 rx_queue_free(queue);
990 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
992 uint16_t tx_free_thresh;
993 uint16_t tx_rs_thresh;
995 /* constraint MACROs require that tx_free_thresh is configured
996 * before tx_rs_thresh */
997 if (conf->tx_free_thresh == 0)
998 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1000 tx_free_thresh = conf->tx_free_thresh;
1002 /* make sure the requested threshold satisfies the constraints */
1003 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1004 FM10K_TX_FREE_THRESH_MAX(q),
1005 FM10K_TX_FREE_THRESH_DIV(q),
1007 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1008 "less than or equal to %u, "
1009 "greater than or equal to %u, "
1010 "and a divisor of %u",
1011 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1012 FM10K_TX_FREE_THRESH_MIN(q),
1013 FM10K_TX_FREE_THRESH_DIV(q));
1017 q->free_thresh = tx_free_thresh;
1019 if (conf->tx_rs_thresh == 0)
1020 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1022 tx_rs_thresh = conf->tx_rs_thresh;
1024 q->tx_deferred_start = conf->tx_deferred_start;
1026 /* make sure the requested threshold satisfies the constraints */
1027 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1028 FM10K_TX_RS_THRESH_MAX(q),
1029 FM10K_TX_RS_THRESH_DIV(q),
1031 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1032 "less than or equal to %u, "
1033 "greater than or equal to %u, "
1034 "and a divisor of %u",
1035 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1036 FM10K_TX_RS_THRESH_MIN(q),
1037 FM10K_TX_RS_THRESH_DIV(q));
1041 q->rs_thresh = tx_rs_thresh;
1047 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1048 uint16_t nb_desc, unsigned int socket_id,
1049 const struct rte_eth_txconf *conf)
1051 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1052 struct fm10k_tx_queue *q;
1053 const struct rte_memzone *mz;
1055 PMD_INIT_FUNC_TRACE();
1057 /* make sure a valid number of descriptors have been requested */
1058 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1059 FM10K_MULT_TX_DESC, nb_desc)) {
1060 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1061 "less than or equal to %"PRIu32", "
1062 "greater than or equal to %u, "
1063 "and a multiple of %u",
1064 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1065 FM10K_MULT_TX_DESC);
1070 * if this queue existed already, free the associated memory. The
1071 * queue cannot be reused in case we need to allocate memory on
1072 * different socket than was previously used.
1074 if (dev->data->tx_queues[queue_id] != NULL) {
1075 tx_queue_free(dev->data->tx_queues[queue_id]);
1076 dev->data->tx_queues[queue_id] = NULL;
1079 /* allocate memory for the queue structure */
1080 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1083 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1088 q->nb_desc = nb_desc;
1089 q->port_id = dev->data->port_id;
1090 q->queue_id = queue_id;
1091 q->tail_ptr = (volatile uint32_t *)
1092 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1093 if (handle_txconf(q, conf))
1096 /* allocate memory for the software ring */
1097 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1098 nb_desc * sizeof(struct rte_mbuf *),
1099 RTE_CACHE_LINE_SIZE, socket_id);
1100 if (q->sw_ring == NULL) {
1101 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1107 * allocate memory for the hardware descriptor ring. A memzone large
1108 * enough to hold the maximum ring size is requested to allow for
1109 * resizing in later calls to the queue setup function.
1111 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1112 dev->data->port_id, queue_id, socket_id,
1113 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1115 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1116 rte_free(q->sw_ring);
1120 q->hw_ring = mz->addr;
1121 q->hw_ring_phys_addr = mz->phys_addr;
1124 * allocate memory for the RS bit tracker. Enough slots to hold the
1125 * descriptor index for each RS bit needing to be set are required.
1127 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1128 ((nb_desc + 1) / q->rs_thresh) *
1130 RTE_CACHE_LINE_SIZE, socket_id);
1131 if (q->rs_tracker.list == NULL) {
1132 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1133 rte_free(q->sw_ring);
1138 dev->data->tx_queues[queue_id] = q;
1143 fm10k_tx_queue_release(void *queue)
1145 PMD_INIT_FUNC_TRACE();
1147 tx_queue_free(queue);
1151 fm10k_reta_update(struct rte_eth_dev *dev,
1152 struct rte_eth_rss_reta_entry64 *reta_conf,
1155 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1156 uint16_t i, j, idx, shift;
1160 PMD_INIT_FUNC_TRACE();
1162 if (reta_size > FM10K_MAX_RSS_INDICES) {
1163 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1164 "(%d) doesn't match the number hardware can supported "
1165 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1170 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1171 * 128-entries in 32 registers
1173 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1174 idx = i / RTE_RETA_GROUP_SIZE;
1175 shift = i % RTE_RETA_GROUP_SIZE;
1176 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1177 BIT_MASK_PER_UINT32);
1182 if (mask != BIT_MASK_PER_UINT32)
1183 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1185 for (j = 0; j < CHARS_PER_UINT32; j++) {
1186 if (mask & (0x1 << j)) {
1188 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1189 reta |= reta_conf[idx].reta[shift + j] <<
1193 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1200 fm10k_reta_query(struct rte_eth_dev *dev,
1201 struct rte_eth_rss_reta_entry64 *reta_conf,
1204 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 uint16_t i, j, idx, shift;
1209 PMD_INIT_FUNC_TRACE();
1211 if (reta_size < FM10K_MAX_RSS_INDICES) {
1212 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1213 "(%d) doesn't match the number hardware can supported "
1214 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1219 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1220 * 128-entries in 32 registers
1222 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1223 idx = i / RTE_RETA_GROUP_SIZE;
1224 shift = i % RTE_RETA_GROUP_SIZE;
1225 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1226 BIT_MASK_PER_UINT32);
1230 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1231 for (j = 0; j < CHARS_PER_UINT32; j++) {
1232 if (mask & (0x1 << j))
1233 reta_conf[idx].reta[shift + j] = ((reta >>
1234 CHAR_BIT * j) & UINT8_MAX);
1242 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1243 struct rte_eth_rss_conf *rss_conf)
1245 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1246 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1248 uint64_t hf = rss_conf->rss_hf;
1251 PMD_INIT_FUNC_TRACE();
1253 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1254 FM10K_RSSRK_ENTRIES_PER_REG)
1261 mrqc |= (hf & ETH_RSS_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1262 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1263 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1264 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1265 mrqc |= (hf & ETH_RSS_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1266 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1267 mrqc |= (hf & ETH_RSS_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1268 mrqc |= (hf & ETH_RSS_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1269 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1271 /* If the mapping doesn't fit any supported, return */
1276 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1277 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1279 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1285 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1286 struct rte_eth_rss_conf *rss_conf)
1288 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1294 PMD_INIT_FUNC_TRACE();
1296 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1297 FM10K_RSSRK_ENTRIES_PER_REG)
1301 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1302 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1304 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1306 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_IPV4_TCP : 0;
1307 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1308 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1309 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1310 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP : 0;
1311 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1312 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_IPV4_UDP : 0;
1313 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP : 0;
1314 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1316 rss_conf->rss_hf = hf;
1321 /* Mailbox message handler in VF */
1322 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1323 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1324 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1325 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1326 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1329 /* Mailbox message handler in PF */
1330 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1331 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1332 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1333 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1334 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1335 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1336 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1337 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1341 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1345 /* Initialize mailbox lock */
1346 fm10k_mbx_initlock(hw);
1348 /* Replace default message handler with new ones */
1349 if (hw->mac.type == fm10k_mac_pf)
1350 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1352 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1355 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1359 /* Connect to SM for PF device or PF for VF device */
1360 return hw->mbx.ops.connect(hw, &hw->mbx);
1364 fm10k_close_mbx_service(struct fm10k_hw *hw)
1366 /* Disconnect from SM for PF device or PF for VF device */
1367 hw->mbx.ops.disconnect(hw, &hw->mbx);
1370 static struct eth_dev_ops fm10k_eth_dev_ops = {
1371 .dev_configure = fm10k_dev_configure,
1372 .dev_start = fm10k_dev_start,
1373 .dev_stop = fm10k_dev_stop,
1374 .dev_close = fm10k_dev_close,
1375 .stats_get = fm10k_stats_get,
1376 .stats_reset = fm10k_stats_reset,
1377 .link_update = fm10k_link_update,
1378 .dev_infos_get = fm10k_dev_infos_get,
1379 .rx_queue_start = fm10k_dev_rx_queue_start,
1380 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1381 .tx_queue_start = fm10k_dev_tx_queue_start,
1382 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1383 .rx_queue_setup = fm10k_rx_queue_setup,
1384 .rx_queue_release = fm10k_rx_queue_release,
1385 .tx_queue_setup = fm10k_tx_queue_setup,
1386 .tx_queue_release = fm10k_tx_queue_release,
1387 .reta_update = fm10k_reta_update,
1388 .reta_query = fm10k_reta_query,
1389 .rss_hash_update = fm10k_rss_hash_update,
1390 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1394 eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
1395 struct rte_eth_dev *dev)
1397 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1400 PMD_INIT_FUNC_TRACE();
1402 dev->dev_ops = &fm10k_eth_dev_ops;
1403 dev->rx_pkt_burst = &fm10k_recv_pkts;
1404 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1406 /* only initialize in the primary process */
1407 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1410 /* Vendor and Device ID need to be set before init of shared code */
1411 memset(hw, 0, sizeof(*hw));
1412 hw->device_id = dev->pci_dev->id.device_id;
1413 hw->vendor_id = dev->pci_dev->id.vendor_id;
1414 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1415 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1416 hw->revision_id = 0;
1417 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1418 if (hw->hw_addr == NULL) {
1419 PMD_INIT_LOG(ERR, "Bad mem resource."
1420 " Try to blacklist unused devices.");
1424 /* Store fm10k_adapter pointer */
1425 hw->back = dev->data->dev_private;
1427 /* Initialize the shared code */
1428 diag = fm10k_init_shared_code(hw);
1429 if (diag != FM10K_SUCCESS) {
1430 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1435 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1436 * there is no way to get link status without reading BAR4. Until this
1437 * works, assume we have maximum bandwidth.
1438 * @todo - fix bus info
1440 hw->bus_caps.speed = fm10k_bus_speed_8000;
1441 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1442 hw->bus_caps.payload = fm10k_bus_payload_512;
1443 hw->bus.speed = fm10k_bus_speed_8000;
1444 hw->bus.width = fm10k_bus_width_pcie_x8;
1445 hw->bus.payload = fm10k_bus_payload_256;
1447 /* Initialize the hw */
1448 diag = fm10k_init_hw(hw);
1449 if (diag != FM10K_SUCCESS) {
1450 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1454 /* Initialize MAC address(es) */
1455 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1456 if (dev->data->mac_addrs == NULL) {
1457 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1461 diag = fm10k_read_mac_addr(hw);
1462 if (diag != FM10K_SUCCESS) {
1464 * TODO: remove special handling on VF. Need shared code to
1467 if (hw->mac.type == fm10k_mac_pf) {
1468 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1471 /* Generate a random addr */
1472 eth_random_addr(hw->mac.addr);
1473 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1477 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1478 &dev->data->mac_addrs[0]);
1480 /* Reset the hw statistics */
1481 fm10k_stats_reset(dev);
1484 diag = fm10k_reset_hw(hw);
1485 if (diag != FM10K_SUCCESS) {
1486 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1490 /* Setup mailbox service */
1491 diag = fm10k_setup_mbx_service(hw);
1492 if (diag != FM10K_SUCCESS) {
1493 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1498 * Below function will trigger operations on mailbox, acquire lock to
1499 * avoid race condition from interrupt handler. Operations on mailbox
1500 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1501 * will handle and generate an interrupt to our side. Then, FIFO in
1502 * mailbox will be touched.
1505 /* Enable port first */
1506 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1508 /* Update default vlan */
1509 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1512 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1513 * unused for VF. PF will assign correct glort for VF.
1515 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1516 hw->mac.default_vid, 1, 0);
1518 /* Set unicast mode by default. App can change to other mode in other
1521 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1522 FM10K_XCAST_MODE_MULTI);
1524 fm10k_mbx_unlock(hw);
1530 * The set of PCI devices this driver supports. This driver will enable both PF
1531 * and SRIOV-VF devices.
1533 static struct rte_pci_id pci_id_fm10k_map[] = {
1534 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1535 #include "rte_pci_dev_ids.h"
1536 { .vendor_id = 0, /* sentinel */ },
1539 static struct eth_driver rte_pmd_fm10k = {
1541 .name = "rte_pmd_fm10k",
1542 .id_table = pci_id_fm10k_map,
1543 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1545 .eth_dev_init = eth_fm10k_dev_init,
1546 .dev_private_size = sizeof(struct fm10k_adapter),
1550 * Driver initialization routine.
1551 * Invoked once at EAL init time.
1552 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1555 rte_pmd_fm10k_init(__rte_unused const char *name,
1556 __rte_unused const char *params)
1558 PMD_INIT_FUNC_TRACE();
1559 rte_eth_driver_register(&rte_pmd_fm10k);
1563 static struct rte_driver rte_fm10k_driver = {
1565 .init = rte_pmd_fm10k_init,
1568 PMD_REGISTER_DRIVER(rte_fm10k_driver);