4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49 /* Number of chars per uint32 type */
50 #define CHARS_PER_UINT32 (sizeof(uint32_t))
51 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
56 fm10k_mbx_initlock(struct fm10k_hw *hw)
58 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
62 fm10k_mbx_lock(struct fm10k_hw *hw)
64 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
65 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
69 fm10k_mbx_unlock(struct fm10k_hw *hw)
71 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
75 * reset queue to initial state, allocate software buffers used when starting
78 * return -ENOMEM if buffers cannot be allocated
79 * return -EINVAL if buffers do not satisfy alignment condition
82 rx_queue_reset(struct fm10k_rx_queue *q)
86 PMD_INIT_FUNC_TRACE();
88 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
92 for (i = 0; i < q->nb_desc; ++i) {
93 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
94 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
95 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
99 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
100 q->hw_ring[i].q.pkt_addr = dma_addr;
101 q->hw_ring[i].q.hdr_addr = dma_addr;
106 q->next_trigger = q->alloc_thresh - 1;
107 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
112 * clean queue, descriptor rings, free software buffers used when stopping
116 rx_queue_clean(struct fm10k_rx_queue *q)
118 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
120 PMD_INIT_FUNC_TRACE();
122 /* zero descriptor rings */
123 for (i = 0; i < q->nb_desc; ++i)
124 q->hw_ring[i] = zero;
126 /* free software buffers */
127 for (i = 0; i < q->nb_desc; ++i) {
129 rte_pktmbuf_free_seg(q->sw_ring[i]);
130 q->sw_ring[i] = NULL;
136 * free all queue memory used when releasing the queue (i.e. configure)
139 rx_queue_free(struct fm10k_rx_queue *q)
141 PMD_INIT_FUNC_TRACE();
143 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
146 rte_free(q->sw_ring);
152 * disable RX queue, wait unitl HW finished necessary flush operation
155 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
159 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
160 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
161 reg & ~FM10K_RXQCTL_ENABLE);
163 /* Wait 100us at most */
164 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
166 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
167 if (!(reg & FM10K_RXQCTL_ENABLE))
171 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
178 * reset queue to initial state, allocate software buffers used when starting
182 tx_queue_reset(struct fm10k_tx_queue *q)
184 PMD_INIT_FUNC_TRACE();
188 q->nb_free = q->nb_desc - 1;
189 q->free_trigger = q->nb_free - q->free_thresh;
190 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
191 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
195 * clean queue, descriptor rings, free software buffers used when stopping
199 tx_queue_clean(struct fm10k_tx_queue *q)
201 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
203 PMD_INIT_FUNC_TRACE();
205 /* zero descriptor rings */
206 for (i = 0; i < q->nb_desc; ++i)
207 q->hw_ring[i] = zero;
209 /* free software buffers */
210 for (i = 0; i < q->nb_desc; ++i) {
212 rte_pktmbuf_free_seg(q->sw_ring[i]);
213 q->sw_ring[i] = NULL;
219 * free all queue memory used when releasing the queue (i.e. configure)
222 tx_queue_free(struct fm10k_tx_queue *q)
224 PMD_INIT_FUNC_TRACE();
226 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
228 if (q->rs_tracker.list)
229 rte_free(q->rs_tracker.list);
231 rte_free(q->sw_ring);
237 * disable TX queue, wait unitl HW finished necessary flush operation
240 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
244 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
245 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
246 reg & ~FM10K_TXDCTL_ENABLE);
248 /* Wait 100us at most */
249 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
251 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
252 if (!(reg & FM10K_TXDCTL_ENABLE))
256 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
263 fm10k_dev_configure(struct rte_eth_dev *dev)
265 PMD_INIT_FUNC_TRACE();
267 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
268 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
274 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
276 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
277 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
278 uint32_t mrqc, *key, i, reta, j;
281 #define RSS_KEY_SIZE 40
282 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
283 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
284 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
285 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
286 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
287 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
290 if (dev->data->nb_rx_queues == 1 ||
291 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
292 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
295 /* random key is rss_intel_key (default) or user provided (rss_key) */
296 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
297 key = (uint32_t *)rss_intel_key;
299 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
301 /* Now fill our hash function seeds, 4 bytes at a time */
302 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
303 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
306 * Fill in redirection table
307 * The byte-swap is needed because NIC registers are in
308 * little-endian order.
311 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
312 if (j == dev->data->nb_rx_queues)
314 reta = (reta << CHAR_BIT) | j;
316 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
321 * Generate RSS hash based on packet types, TCP/UDP
322 * port numbers and/or IPv4/v6 src and dst addresses
324 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
326 mrqc |= (hf & ETH_RSS_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
327 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
328 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
329 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
330 mrqc |= (hf & ETH_RSS_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
331 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
332 mrqc |= (hf & ETH_RSS_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
333 mrqc |= (hf & ETH_RSS_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
334 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
337 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
342 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
346 fm10k_dev_tx_init(struct rte_eth_dev *dev)
348 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
350 struct fm10k_tx_queue *txq;
354 /* Disable TXINT to avoid possible interrupt */
355 for (i = 0; i < hw->mac.max_queues; i++)
356 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
357 3 << FM10K_TXINT_TIMER_SHIFT);
360 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
361 txq = dev->data->tx_queues[i];
362 base_addr = txq->hw_ring_phys_addr;
363 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
365 /* disable queue to avoid issues while updating state */
366 ret = tx_queue_disable(hw, i);
368 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
372 /* set location and size for descriptor ring */
373 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
374 base_addr & UINT64_LOWER_32BITS_MASK);
375 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
376 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
377 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
383 fm10k_dev_rx_init(struct rte_eth_dev *dev)
385 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387 struct fm10k_rx_queue *rxq;
390 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
392 struct rte_pktmbuf_pool_private *mbp_priv;
394 /* Disable RXINT to avoid possible interrupt */
395 for (i = 0; i < hw->mac.max_queues; i++)
396 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
397 3 << FM10K_RXINT_TIMER_SHIFT);
399 /* Setup RX queues */
400 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
401 rxq = dev->data->rx_queues[i];
402 base_addr = rxq->hw_ring_phys_addr;
403 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
405 /* disable queue to avoid issues while updating state */
406 ret = rx_queue_disable(hw, i);
408 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
412 /* Setup the Base and Length of the Rx Descriptor Ring */
413 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
414 base_addr & UINT64_LOWER_32BITS_MASK);
415 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
416 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
417 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
419 /* Configure the Rx buffer size for one buff without split */
420 mbp_priv = rte_mempool_get_priv(rxq->mp);
421 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
422 RTE_PKTMBUF_HEADROOM);
423 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
424 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
426 /* It adds dual VLAN length for supporting dual VLAN */
427 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
428 2 * FM10K_VLAN_TAG_SIZE) > buf_size){
429 dev->data->scattered_rx = 1;
430 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
433 /* Enable drop on empty, it's RO for VF */
434 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
435 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
437 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
438 FM10K_WRITE_FLUSH(hw);
441 if (dev->data->dev_conf.rxmode.enable_scatter) {
442 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
443 dev->data->scattered_rx = 1;
446 /* Configure RSS if applicable */
447 fm10k_dev_mq_rx_configure(dev);
452 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
454 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
457 struct fm10k_rx_queue *rxq;
459 PMD_INIT_FUNC_TRACE();
461 if (rx_queue_id < dev->data->nb_rx_queues) {
462 rxq = dev->data->rx_queues[rx_queue_id];
463 err = rx_queue_reset(rxq);
464 if (err == -ENOMEM) {
465 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
467 } else if (err == -EINVAL) {
468 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
473 /* Setup the HW Rx Head and Tail Descriptor Pointers
474 * Note: this must be done AFTER the queue is enabled on real
475 * hardware, but BEFORE the queue is enabled when using the
476 * emulation platform. Do it in both places for now and remove
477 * this comment and the following two register writes when the
478 * emulation platform is no longer being used.
480 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
481 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
483 /* Set PF ownership flag for PF devices */
484 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
485 if (hw->mac.type == fm10k_mac_pf)
486 reg |= FM10K_RXQCTL_PF;
487 reg |= FM10K_RXQCTL_ENABLE;
488 /* enable RX queue */
489 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
490 FM10K_WRITE_FLUSH(hw);
492 /* Setup the HW Rx Head and Tail Descriptor Pointers
493 * Note: this must be done AFTER the queue is enabled
495 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
496 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
503 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
505 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
507 PMD_INIT_FUNC_TRACE();
509 if (rx_queue_id < dev->data->nb_rx_queues) {
510 /* Disable RX queue */
511 rx_queue_disable(hw, rx_queue_id);
513 /* Free mbuf and clean HW ring */
514 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
521 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
523 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
524 /** @todo - this should be defined in the shared code */
525 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
526 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
529 PMD_INIT_FUNC_TRACE();
531 if (tx_queue_id < dev->data->nb_tx_queues) {
532 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
534 /* reset head and tail pointers */
535 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
536 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
538 /* enable TX queue */
539 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
540 FM10K_TXDCTL_ENABLE | txdctl);
541 FM10K_WRITE_FLUSH(hw);
549 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
551 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 PMD_INIT_FUNC_TRACE();
555 if (tx_queue_id < dev->data->nb_tx_queues) {
556 tx_queue_disable(hw, tx_queue_id);
557 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
563 /* fls = find last set bit = 32 minus the number of leading zeros */
565 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
567 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
569 fm10k_dev_start(struct rte_eth_dev *dev)
571 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
574 PMD_INIT_FUNC_TRACE();
576 /* stop, init, then start the hw */
577 diag = fm10k_stop_hw(hw);
578 if (diag != FM10K_SUCCESS) {
579 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
583 diag = fm10k_init_hw(hw);
584 if (diag != FM10K_SUCCESS) {
585 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
589 diag = fm10k_start_hw(hw);
590 if (diag != FM10K_SUCCESS) {
591 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
595 diag = fm10k_dev_tx_init(dev);
597 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
601 diag = fm10k_dev_rx_init(dev);
603 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
607 if (hw->mac.type == fm10k_mac_pf) {
608 /* Establish only VSI 0 as valid */
609 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
611 /* Configure RSS bits used in RETA table */
612 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
613 fls(dev->data->nb_rx_queues - 1) <<
614 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
616 /* Invalidate all other GLORT entries */
617 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
618 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
619 FM10K_DGLORTMAP_NONE);
622 for (i = 0; i < dev->data->nb_rx_queues; i++) {
623 struct fm10k_rx_queue *rxq;
624 rxq = dev->data->rx_queues[i];
626 if (rxq->rx_deferred_start)
628 diag = fm10k_dev_rx_queue_start(dev, i);
631 for (j = 0; j < i; ++j)
632 rx_queue_clean(dev->data->rx_queues[j]);
637 for (i = 0; i < dev->data->nb_tx_queues; i++) {
638 struct fm10k_tx_queue *txq;
639 txq = dev->data->tx_queues[i];
641 if (txq->tx_deferred_start)
643 diag = fm10k_dev_tx_queue_start(dev, i);
646 for (j = 0; j < dev->data->nb_rx_queues; ++j)
647 rx_queue_clean(dev->data->rx_queues[j]);
656 fm10k_dev_stop(struct rte_eth_dev *dev)
660 PMD_INIT_FUNC_TRACE();
662 for (i = 0; i < dev->data->nb_tx_queues; i++)
663 fm10k_dev_tx_queue_stop(dev, i);
665 for (i = 0; i < dev->data->nb_rx_queues; i++)
666 fm10k_dev_rx_queue_stop(dev, i);
670 fm10k_dev_close(struct rte_eth_dev *dev)
672 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
674 PMD_INIT_FUNC_TRACE();
676 /* Stop mailbox service first */
677 fm10k_close_mbx_service(hw);
683 fm10k_link_update(struct rte_eth_dev *dev,
684 __rte_unused int wait_to_complete)
686 PMD_INIT_FUNC_TRACE();
688 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
689 * x8 PCIe interface. For now, we leave the speed undefined since there
690 * is no 50Gbps Ethernet. */
691 dev->data->dev_link.link_speed = 0;
692 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
693 dev->data->dev_link.link_status = 1;
699 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
701 uint64_t ipackets, opackets, ibytes, obytes;
702 struct fm10k_hw *hw =
703 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
704 struct fm10k_hw_stats *hw_stats =
705 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
708 PMD_INIT_FUNC_TRACE();
710 fm10k_update_hw_stats(hw, hw_stats);
712 ipackets = opackets = ibytes = obytes = 0;
713 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
714 (i < FM10K_MAX_QUEUES_PF); ++i) {
715 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
716 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
717 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
718 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
719 ipackets += stats->q_ipackets[i];
720 opackets += stats->q_opackets[i];
721 ibytes += stats->q_ibytes[i];
722 obytes += stats->q_obytes[i];
724 stats->ipackets = ipackets;
725 stats->opackets = opackets;
726 stats->ibytes = ibytes;
727 stats->obytes = obytes;
731 fm10k_stats_reset(struct rte_eth_dev *dev)
733 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
734 struct fm10k_hw_stats *hw_stats =
735 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
737 PMD_INIT_FUNC_TRACE();
739 memset(hw_stats, 0, sizeof(*hw_stats));
740 fm10k_rebind_hw_stats(hw, hw_stats);
744 fm10k_dev_infos_get(struct rte_eth_dev *dev,
745 struct rte_eth_dev_info *dev_info)
747 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
749 PMD_INIT_FUNC_TRACE();
751 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
752 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
753 dev_info->max_rx_queues = hw->mac.max_queues;
754 dev_info->max_tx_queues = hw->mac.max_queues;
755 dev_info->max_mac_addrs = 1;
756 dev_info->max_hash_mac_addrs = 0;
757 dev_info->max_vfs = FM10K_MAX_VF_NUM;
758 dev_info->max_vmdq_pools = ETH_64_POOLS;
759 dev_info->rx_offload_capa =
760 DEV_RX_OFFLOAD_IPV4_CKSUM |
761 DEV_RX_OFFLOAD_UDP_CKSUM |
762 DEV_RX_OFFLOAD_TCP_CKSUM;
763 dev_info->tx_offload_capa = 0;
764 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
766 dev_info->default_rxconf = (struct rte_eth_rxconf) {
768 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
769 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
770 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
772 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
776 dev_info->default_txconf = (struct rte_eth_txconf) {
778 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
779 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
780 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
782 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
783 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
784 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
785 ETH_TXQ_FLAGS_NOOFFLOADS,
791 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
793 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
795 PMD_INIT_FUNC_TRACE();
797 /* @todo - add support for the VF */
798 if (hw->mac.type != fm10k_mac_pf)
801 return fm10k_update_vlan(hw, vlan_id, 0, on);
805 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
807 if ((request < min) || (request > max) || ((request % mult) != 0))
814 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
815 * the physical address is required. If the memzone is already created, then
816 * this function returns a pointer to the existing memzone.
818 static inline const struct rte_memzone *
819 allocate_hw_ring(const char *driver_name, const char *ring_name,
820 uint8_t port_id, uint16_t queue_id, int socket_id,
821 uint32_t size, uint32_t align)
823 char name[RTE_MEMZONE_NAMESIZE];
824 const struct rte_memzone *mz;
826 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
827 driver_name, ring_name, port_id, queue_id, socket_id);
829 /* return the memzone if it already exists */
830 mz = rte_memzone_lookup(name);
834 #ifdef RTE_LIBRTE_XEN_DOM0
835 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
838 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
843 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
845 if ((request < min) || (request > max) || ((div % request) != 0))
852 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
854 uint16_t rx_free_thresh;
856 if (conf->rx_free_thresh == 0)
857 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
859 rx_free_thresh = conf->rx_free_thresh;
861 /* make sure the requested threshold satisfies the constraints */
862 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
863 FM10K_RX_FREE_THRESH_MAX(q),
864 FM10K_RX_FREE_THRESH_DIV(q),
866 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
867 "less than or equal to %u, "
868 "greater than or equal to %u, "
869 "and a divisor of %u",
870 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
871 FM10K_RX_FREE_THRESH_MIN(q),
872 FM10K_RX_FREE_THRESH_DIV(q));
876 q->alloc_thresh = rx_free_thresh;
877 q->drop_en = conf->rx_drop_en;
878 q->rx_deferred_start = conf->rx_deferred_start;
884 * Hardware requires specific alignment for Rx packet buffers. At
885 * least one of the following two conditions must be satisfied.
886 * 1. Address is 512B aligned
887 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
889 * As such, the driver may need to adjust the DMA address within the
890 * buffer by up to 512B. The mempool element size is checked here
891 * to make sure a maximally sized Ethernet frame can still be wholly
892 * contained within the buffer after 512B alignment.
894 * return 1 if the element size is valid, otherwise return 0.
897 mempool_element_size_valid(struct rte_mempool *mp)
901 /* elt_size includes mbuf header and headroom */
902 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
903 RTE_PKTMBUF_HEADROOM;
905 /* account for up to 512B of alignment */
906 min_size -= FM10K_RX_BUFF_ALIGN;
908 /* sanity check for overflow */
909 if (min_size > mp->elt_size)
912 if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
920 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
921 uint16_t nb_desc, unsigned int socket_id,
922 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
924 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
925 struct fm10k_rx_queue *q;
926 const struct rte_memzone *mz;
928 PMD_INIT_FUNC_TRACE();
930 /* make sure the mempool element size can account for alignment. */
931 if (!mempool_element_size_valid(mp)) {
932 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
936 /* make sure a valid number of descriptors have been requested */
937 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
938 FM10K_MULT_RX_DESC, nb_desc)) {
939 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
940 "less than or equal to %"PRIu32", "
941 "greater than or equal to %u, "
942 "and a multiple of %u",
943 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
949 * if this queue existed already, free the associated memory. The
950 * queue cannot be reused in case we need to allocate memory on
951 * different socket than was previously used.
953 if (dev->data->rx_queues[queue_id] != NULL) {
954 rx_queue_free(dev->data->rx_queues[queue_id]);
955 dev->data->rx_queues[queue_id] = NULL;
958 /* allocate memory for the queue structure */
959 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
962 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
968 q->nb_desc = nb_desc;
969 q->port_id = dev->data->port_id;
970 q->queue_id = queue_id;
971 q->tail_ptr = (volatile uint32_t *)
972 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
973 if (handle_rxconf(q, conf))
976 /* allocate memory for the software ring */
977 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
978 nb_desc * sizeof(struct rte_mbuf *),
979 RTE_CACHE_LINE_SIZE, socket_id);
980 if (q->sw_ring == NULL) {
981 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
987 * allocate memory for the hardware descriptor ring. A memzone large
988 * enough to hold the maximum ring size is requested to allow for
989 * resizing in later calls to the queue setup function.
991 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
992 dev->data->port_id, queue_id, socket_id,
993 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
995 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
996 rte_free(q->sw_ring);
1000 q->hw_ring = mz->addr;
1001 q->hw_ring_phys_addr = mz->phys_addr;
1003 dev->data->rx_queues[queue_id] = q;
1008 fm10k_rx_queue_release(void *queue)
1010 PMD_INIT_FUNC_TRACE();
1012 rx_queue_free(queue);
1016 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1018 uint16_t tx_free_thresh;
1019 uint16_t tx_rs_thresh;
1021 /* constraint MACROs require that tx_free_thresh is configured
1022 * before tx_rs_thresh */
1023 if (conf->tx_free_thresh == 0)
1024 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1026 tx_free_thresh = conf->tx_free_thresh;
1028 /* make sure the requested threshold satisfies the constraints */
1029 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1030 FM10K_TX_FREE_THRESH_MAX(q),
1031 FM10K_TX_FREE_THRESH_DIV(q),
1033 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1034 "less than or equal to %u, "
1035 "greater than or equal to %u, "
1036 "and a divisor of %u",
1037 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1038 FM10K_TX_FREE_THRESH_MIN(q),
1039 FM10K_TX_FREE_THRESH_DIV(q));
1043 q->free_thresh = tx_free_thresh;
1045 if (conf->tx_rs_thresh == 0)
1046 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1048 tx_rs_thresh = conf->tx_rs_thresh;
1050 q->tx_deferred_start = conf->tx_deferred_start;
1052 /* make sure the requested threshold satisfies the constraints */
1053 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1054 FM10K_TX_RS_THRESH_MAX(q),
1055 FM10K_TX_RS_THRESH_DIV(q),
1057 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1058 "less than or equal to %u, "
1059 "greater than or equal to %u, "
1060 "and a divisor of %u",
1061 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1062 FM10K_TX_RS_THRESH_MIN(q),
1063 FM10K_TX_RS_THRESH_DIV(q));
1067 q->rs_thresh = tx_rs_thresh;
1073 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1074 uint16_t nb_desc, unsigned int socket_id,
1075 const struct rte_eth_txconf *conf)
1077 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1078 struct fm10k_tx_queue *q;
1079 const struct rte_memzone *mz;
1081 PMD_INIT_FUNC_TRACE();
1083 /* make sure a valid number of descriptors have been requested */
1084 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1085 FM10K_MULT_TX_DESC, nb_desc)) {
1086 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1087 "less than or equal to %"PRIu32", "
1088 "greater than or equal to %u, "
1089 "and a multiple of %u",
1090 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1091 FM10K_MULT_TX_DESC);
1096 * if this queue existed already, free the associated memory. The
1097 * queue cannot be reused in case we need to allocate memory on
1098 * different socket than was previously used.
1100 if (dev->data->tx_queues[queue_id] != NULL) {
1101 tx_queue_free(dev->data->tx_queues[queue_id]);
1102 dev->data->tx_queues[queue_id] = NULL;
1105 /* allocate memory for the queue structure */
1106 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1109 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1114 q->nb_desc = nb_desc;
1115 q->port_id = dev->data->port_id;
1116 q->queue_id = queue_id;
1117 q->tail_ptr = (volatile uint32_t *)
1118 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1119 if (handle_txconf(q, conf))
1122 /* allocate memory for the software ring */
1123 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1124 nb_desc * sizeof(struct rte_mbuf *),
1125 RTE_CACHE_LINE_SIZE, socket_id);
1126 if (q->sw_ring == NULL) {
1127 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1133 * allocate memory for the hardware descriptor ring. A memzone large
1134 * enough to hold the maximum ring size is requested to allow for
1135 * resizing in later calls to the queue setup function.
1137 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1138 dev->data->port_id, queue_id, socket_id,
1139 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1141 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1142 rte_free(q->sw_ring);
1146 q->hw_ring = mz->addr;
1147 q->hw_ring_phys_addr = mz->phys_addr;
1150 * allocate memory for the RS bit tracker. Enough slots to hold the
1151 * descriptor index for each RS bit needing to be set are required.
1153 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1154 ((nb_desc + 1) / q->rs_thresh) *
1156 RTE_CACHE_LINE_SIZE, socket_id);
1157 if (q->rs_tracker.list == NULL) {
1158 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1159 rte_free(q->sw_ring);
1164 dev->data->tx_queues[queue_id] = q;
1169 fm10k_tx_queue_release(void *queue)
1171 PMD_INIT_FUNC_TRACE();
1173 tx_queue_free(queue);
1177 fm10k_reta_update(struct rte_eth_dev *dev,
1178 struct rte_eth_rss_reta_entry64 *reta_conf,
1181 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1182 uint16_t i, j, idx, shift;
1186 PMD_INIT_FUNC_TRACE();
1188 if (reta_size > FM10K_MAX_RSS_INDICES) {
1189 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1190 "(%d) doesn't match the number hardware can supported "
1191 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1196 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1197 * 128-entries in 32 registers
1199 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1200 idx = i / RTE_RETA_GROUP_SIZE;
1201 shift = i % RTE_RETA_GROUP_SIZE;
1202 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1203 BIT_MASK_PER_UINT32);
1208 if (mask != BIT_MASK_PER_UINT32)
1209 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1211 for (j = 0; j < CHARS_PER_UINT32; j++) {
1212 if (mask & (0x1 << j)) {
1214 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1215 reta |= reta_conf[idx].reta[shift + j] <<
1219 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1226 fm10k_reta_query(struct rte_eth_dev *dev,
1227 struct rte_eth_rss_reta_entry64 *reta_conf,
1230 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 uint16_t i, j, idx, shift;
1235 PMD_INIT_FUNC_TRACE();
1237 if (reta_size < FM10K_MAX_RSS_INDICES) {
1238 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1239 "(%d) doesn't match the number hardware can supported "
1240 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1245 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1246 * 128-entries in 32 registers
1248 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1249 idx = i / RTE_RETA_GROUP_SIZE;
1250 shift = i % RTE_RETA_GROUP_SIZE;
1251 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1252 BIT_MASK_PER_UINT32);
1256 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1257 for (j = 0; j < CHARS_PER_UINT32; j++) {
1258 if (mask & (0x1 << j))
1259 reta_conf[idx].reta[shift + j] = ((reta >>
1260 CHAR_BIT * j) & UINT8_MAX);
1268 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1269 struct rte_eth_rss_conf *rss_conf)
1271 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1274 uint64_t hf = rss_conf->rss_hf;
1277 PMD_INIT_FUNC_TRACE();
1279 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1280 FM10K_RSSRK_ENTRIES_PER_REG)
1287 mrqc |= (hf & ETH_RSS_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1288 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1289 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1290 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1291 mrqc |= (hf & ETH_RSS_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1292 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1293 mrqc |= (hf & ETH_RSS_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1294 mrqc |= (hf & ETH_RSS_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1295 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1297 /* If the mapping doesn't fit any supported, return */
1302 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1303 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1305 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1311 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1312 struct rte_eth_rss_conf *rss_conf)
1314 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1320 PMD_INIT_FUNC_TRACE();
1322 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1323 FM10K_RSSRK_ENTRIES_PER_REG)
1327 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1328 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1330 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1332 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_IPV4_TCP : 0;
1333 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1334 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1335 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1336 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP : 0;
1337 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1338 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_IPV4_UDP : 0;
1339 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP : 0;
1340 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1342 rss_conf->rss_hf = hf;
1347 /* Mailbox message handler in VF */
1348 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1349 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1350 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1351 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1352 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1355 /* Mailbox message handler in PF */
1356 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1357 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1358 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1359 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1360 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1361 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1362 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1363 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1367 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1371 /* Initialize mailbox lock */
1372 fm10k_mbx_initlock(hw);
1374 /* Replace default message handler with new ones */
1375 if (hw->mac.type == fm10k_mac_pf)
1376 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1378 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1381 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1385 /* Connect to SM for PF device or PF for VF device */
1386 return hw->mbx.ops.connect(hw, &hw->mbx);
1390 fm10k_close_mbx_service(struct fm10k_hw *hw)
1392 /* Disconnect from SM for PF device or PF for VF device */
1393 hw->mbx.ops.disconnect(hw, &hw->mbx);
1396 static struct eth_dev_ops fm10k_eth_dev_ops = {
1397 .dev_configure = fm10k_dev_configure,
1398 .dev_start = fm10k_dev_start,
1399 .dev_stop = fm10k_dev_stop,
1400 .dev_close = fm10k_dev_close,
1401 .stats_get = fm10k_stats_get,
1402 .stats_reset = fm10k_stats_reset,
1403 .link_update = fm10k_link_update,
1404 .dev_infos_get = fm10k_dev_infos_get,
1405 .vlan_filter_set = fm10k_vlan_filter_set,
1406 .rx_queue_start = fm10k_dev_rx_queue_start,
1407 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1408 .tx_queue_start = fm10k_dev_tx_queue_start,
1409 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1410 .rx_queue_setup = fm10k_rx_queue_setup,
1411 .rx_queue_release = fm10k_rx_queue_release,
1412 .tx_queue_setup = fm10k_tx_queue_setup,
1413 .tx_queue_release = fm10k_tx_queue_release,
1414 .reta_update = fm10k_reta_update,
1415 .reta_query = fm10k_reta_query,
1416 .rss_hash_update = fm10k_rss_hash_update,
1417 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1421 eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
1422 struct rte_eth_dev *dev)
1424 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 PMD_INIT_FUNC_TRACE();
1429 dev->dev_ops = &fm10k_eth_dev_ops;
1430 dev->rx_pkt_burst = &fm10k_recv_pkts;
1431 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1433 if (dev->data->scattered_rx)
1434 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
1436 /* only initialize in the primary process */
1437 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1440 /* Vendor and Device ID need to be set before init of shared code */
1441 memset(hw, 0, sizeof(*hw));
1442 hw->device_id = dev->pci_dev->id.device_id;
1443 hw->vendor_id = dev->pci_dev->id.vendor_id;
1444 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1445 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1446 hw->revision_id = 0;
1447 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1448 if (hw->hw_addr == NULL) {
1449 PMD_INIT_LOG(ERR, "Bad mem resource."
1450 " Try to blacklist unused devices.");
1454 /* Store fm10k_adapter pointer */
1455 hw->back = dev->data->dev_private;
1457 /* Initialize the shared code */
1458 diag = fm10k_init_shared_code(hw);
1459 if (diag != FM10K_SUCCESS) {
1460 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1465 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1466 * there is no way to get link status without reading BAR4. Until this
1467 * works, assume we have maximum bandwidth.
1468 * @todo - fix bus info
1470 hw->bus_caps.speed = fm10k_bus_speed_8000;
1471 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1472 hw->bus_caps.payload = fm10k_bus_payload_512;
1473 hw->bus.speed = fm10k_bus_speed_8000;
1474 hw->bus.width = fm10k_bus_width_pcie_x8;
1475 hw->bus.payload = fm10k_bus_payload_256;
1477 /* Initialize the hw */
1478 diag = fm10k_init_hw(hw);
1479 if (diag != FM10K_SUCCESS) {
1480 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1484 /* Initialize MAC address(es) */
1485 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1486 if (dev->data->mac_addrs == NULL) {
1487 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1491 diag = fm10k_read_mac_addr(hw);
1492 if (diag != FM10K_SUCCESS) {
1494 * TODO: remove special handling on VF. Need shared code to
1497 if (hw->mac.type == fm10k_mac_pf) {
1498 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1501 /* Generate a random addr */
1502 eth_random_addr(hw->mac.addr);
1503 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1507 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1508 &dev->data->mac_addrs[0]);
1510 /* Reset the hw statistics */
1511 fm10k_stats_reset(dev);
1514 diag = fm10k_reset_hw(hw);
1515 if (diag != FM10K_SUCCESS) {
1516 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1520 /* Setup mailbox service */
1521 diag = fm10k_setup_mbx_service(hw);
1522 if (diag != FM10K_SUCCESS) {
1523 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1528 * Below function will trigger operations on mailbox, acquire lock to
1529 * avoid race condition from interrupt handler. Operations on mailbox
1530 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1531 * will handle and generate an interrupt to our side. Then, FIFO in
1532 * mailbox will be touched.
1535 /* Enable port first */
1536 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1538 /* Update default vlan */
1539 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1542 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1543 * unused for VF. PF will assign correct glort for VF.
1545 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1546 hw->mac.default_vid, 1, 0);
1548 /* Set unicast mode by default. App can change to other mode in other
1551 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1552 FM10K_XCAST_MODE_MULTI);
1554 fm10k_mbx_unlock(hw);
1560 * The set of PCI devices this driver supports. This driver will enable both PF
1561 * and SRIOV-VF devices.
1563 static struct rte_pci_id pci_id_fm10k_map[] = {
1564 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1565 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1566 #include "rte_pci_dev_ids.h"
1567 { .vendor_id = 0, /* sentinel */ },
1570 static struct eth_driver rte_pmd_fm10k = {
1572 .name = "rte_pmd_fm10k",
1573 .id_table = pci_id_fm10k_map,
1574 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1576 .eth_dev_init = eth_fm10k_dev_init,
1577 .dev_private_size = sizeof(struct fm10k_adapter),
1581 * Driver initialization routine.
1582 * Invoked once at EAL init time.
1583 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1586 rte_pmd_fm10k_init(__rte_unused const char *name,
1587 __rte_unused const char *params)
1589 PMD_INIT_FUNC_TRACE();
1590 rte_eth_driver_register(&rte_pmd_fm10k);
1594 static struct rte_driver rte_fm10k_driver = {
1596 .init = rte_pmd_fm10k_init,
1599 PMD_REGISTER_DRIVER(rte_fm10k_driver);