4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US 100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
67 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
72 fm10k_mbx_initlock(struct fm10k_hw *hw)
74 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
78 fm10k_mbx_lock(struct fm10k_hw *hw)
80 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
81 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
85 fm10k_mbx_unlock(struct fm10k_hw *hw)
87 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
91 * reset queue to initial state, allocate software buffers used when starting
94 * return -ENOMEM if buffers cannot be allocated
95 * return -EINVAL if buffers do not satisfy alignment condition
98 rx_queue_reset(struct fm10k_rx_queue *q)
102 PMD_INIT_FUNC_TRACE();
104 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
108 for (i = 0; i < q->nb_desc; ++i) {
109 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
110 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
111 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
115 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
116 q->hw_ring[i].q.pkt_addr = dma_addr;
117 q->hw_ring[i].q.hdr_addr = dma_addr;
122 q->next_trigger = q->alloc_thresh - 1;
123 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
128 * clean queue, descriptor rings, free software buffers used when stopping
132 rx_queue_clean(struct fm10k_rx_queue *q)
134 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
136 PMD_INIT_FUNC_TRACE();
138 /* zero descriptor rings */
139 for (i = 0; i < q->nb_desc; ++i)
140 q->hw_ring[i] = zero;
142 /* free software buffers */
143 for (i = 0; i < q->nb_desc; ++i) {
145 rte_pktmbuf_free_seg(q->sw_ring[i]);
146 q->sw_ring[i] = NULL;
152 * free all queue memory used when releasing the queue (i.e. configure)
155 rx_queue_free(struct fm10k_rx_queue *q)
157 PMD_INIT_FUNC_TRACE();
159 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
162 rte_free(q->sw_ring);
171 * disable RX queue, wait unitl HW finished necessary flush operation
174 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
178 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
179 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
180 reg & ~FM10K_RXQCTL_ENABLE);
182 /* Wait 100us at most */
183 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
185 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
186 if (!(reg & FM10K_RXQCTL_ENABLE))
190 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
197 * reset queue to initial state, allocate software buffers used when starting
201 tx_queue_reset(struct fm10k_tx_queue *q)
203 PMD_INIT_FUNC_TRACE();
207 q->nb_free = q->nb_desc - 1;
208 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
209 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
213 * clean queue, descriptor rings, free software buffers used when stopping
217 tx_queue_clean(struct fm10k_tx_queue *q)
219 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
221 PMD_INIT_FUNC_TRACE();
223 /* zero descriptor rings */
224 for (i = 0; i < q->nb_desc; ++i)
225 q->hw_ring[i] = zero;
227 /* free software buffers */
228 for (i = 0; i < q->nb_desc; ++i) {
230 rte_pktmbuf_free_seg(q->sw_ring[i]);
231 q->sw_ring[i] = NULL;
237 * free all queue memory used when releasing the queue (i.e. configure)
240 tx_queue_free(struct fm10k_tx_queue *q)
242 PMD_INIT_FUNC_TRACE();
244 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
246 if (q->rs_tracker.list) {
247 rte_free(q->rs_tracker.list);
248 q->rs_tracker.list = NULL;
251 rte_free(q->sw_ring);
260 * disable TX queue, wait unitl HW finished necessary flush operation
263 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
267 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
268 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
269 reg & ~FM10K_TXDCTL_ENABLE);
271 /* Wait 100us at most */
272 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
274 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
275 if (!(reg & FM10K_TXDCTL_ENABLE))
279 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
286 fm10k_dev_configure(struct rte_eth_dev *dev)
288 PMD_INIT_FUNC_TRACE();
290 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
291 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
297 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
299 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
300 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
301 uint32_t mrqc, *key, i, reta, j;
304 #define RSS_KEY_SIZE 40
305 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
306 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
307 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
308 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
309 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
310 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
313 if (dev->data->nb_rx_queues == 1 ||
314 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
315 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
318 /* random key is rss_intel_key (default) or user provided (rss_key) */
319 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
320 key = (uint32_t *)rss_intel_key;
322 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
324 /* Now fill our hash function seeds, 4 bytes at a time */
325 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
326 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
329 * Fill in redirection table
330 * The byte-swap is needed because NIC registers are in
331 * little-endian order.
334 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
335 if (j == dev->data->nb_rx_queues)
337 reta = (reta << CHAR_BIT) | j;
339 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
344 * Generate RSS hash based on packet types, TCP/UDP
345 * port numbers and/or IPv4/v6 src and dst addresses
347 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
349 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
350 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
351 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
352 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
353 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
354 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
355 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
356 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
357 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
360 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
365 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
369 fm10k_dev_tx_init(struct rte_eth_dev *dev)
371 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
373 struct fm10k_tx_queue *txq;
377 /* Disable TXINT to avoid possible interrupt */
378 for (i = 0; i < hw->mac.max_queues; i++)
379 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
380 3 << FM10K_TXINT_TIMER_SHIFT);
383 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
384 txq = dev->data->tx_queues[i];
385 base_addr = txq->hw_ring_phys_addr;
386 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
388 /* disable queue to avoid issues while updating state */
389 ret = tx_queue_disable(hw, i);
391 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
395 /* set location and size for descriptor ring */
396 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
397 base_addr & UINT64_LOWER_32BITS_MASK);
398 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
399 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
400 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
406 fm10k_dev_rx_init(struct rte_eth_dev *dev)
408 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
410 struct fm10k_rx_queue *rxq;
413 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
416 /* Disable RXINT to avoid possible interrupt */
417 for (i = 0; i < hw->mac.max_queues; i++)
418 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
419 3 << FM10K_RXINT_TIMER_SHIFT);
421 /* Setup RX queues */
422 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
423 rxq = dev->data->rx_queues[i];
424 base_addr = rxq->hw_ring_phys_addr;
425 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
427 /* disable queue to avoid issues while updating state */
428 ret = rx_queue_disable(hw, i);
430 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
434 /* Setup the Base and Length of the Rx Descriptor Ring */
435 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
436 base_addr & UINT64_LOWER_32BITS_MASK);
437 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
438 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
439 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
441 /* Configure the Rx buffer size for one buff without split */
442 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
443 RTE_PKTMBUF_HEADROOM);
444 /* As RX buffer is aligned to 512B within mbuf, some bytes are
445 * reserved for this purpose, and the worst case could be 511B.
446 * But SRR reg assumes all buffers have the same size. In order
447 * to fill the gap, we'll have to consider the worst case and
448 * assume 512B is reserved. If we don't do so, it's possible
449 * for HW to overwrite data to next mbuf.
451 buf_size -= FM10K_RX_DATABUF_ALIGN;
453 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
454 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
456 /* It adds dual VLAN length for supporting dual VLAN */
457 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
458 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
459 dev->data->dev_conf.rxmode.enable_scatter) {
461 dev->data->scattered_rx = 1;
462 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
463 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
464 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
465 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
468 /* Enable drop on empty, it's RO for VF */
469 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
470 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
472 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
473 FM10K_WRITE_FLUSH(hw);
476 /* Configure RSS if applicable */
477 fm10k_dev_mq_rx_configure(dev);
482 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
484 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
487 struct fm10k_rx_queue *rxq;
489 PMD_INIT_FUNC_TRACE();
491 if (rx_queue_id < dev->data->nb_rx_queues) {
492 rxq = dev->data->rx_queues[rx_queue_id];
493 err = rx_queue_reset(rxq);
494 if (err == -ENOMEM) {
495 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
497 } else if (err == -EINVAL) {
498 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
503 /* Setup the HW Rx Head and Tail Descriptor Pointers
504 * Note: this must be done AFTER the queue is enabled on real
505 * hardware, but BEFORE the queue is enabled when using the
506 * emulation platform. Do it in both places for now and remove
507 * this comment and the following two register writes when the
508 * emulation platform is no longer being used.
510 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
511 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
513 /* Set PF ownership flag for PF devices */
514 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
515 if (hw->mac.type == fm10k_mac_pf)
516 reg |= FM10K_RXQCTL_PF;
517 reg |= FM10K_RXQCTL_ENABLE;
518 /* enable RX queue */
519 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
520 FM10K_WRITE_FLUSH(hw);
522 /* Setup the HW Rx Head and Tail Descriptor Pointers
523 * Note: this must be done AFTER the queue is enabled
525 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
526 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
533 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
535 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537 PMD_INIT_FUNC_TRACE();
539 if (rx_queue_id < dev->data->nb_rx_queues) {
540 /* Disable RX queue */
541 rx_queue_disable(hw, rx_queue_id);
543 /* Free mbuf and clean HW ring */
544 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
551 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
553 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 /** @todo - this should be defined in the shared code */
555 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
556 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
559 PMD_INIT_FUNC_TRACE();
561 if (tx_queue_id < dev->data->nb_tx_queues) {
562 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
564 /* reset head and tail pointers */
565 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
566 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
568 /* enable TX queue */
569 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
570 FM10K_TXDCTL_ENABLE | txdctl);
571 FM10K_WRITE_FLUSH(hw);
579 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
581 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
583 PMD_INIT_FUNC_TRACE();
585 if (tx_queue_id < dev->data->nb_tx_queues) {
586 tx_queue_disable(hw, tx_queue_id);
587 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
593 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
595 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
596 != FM10K_DGLORTMAP_NONE);
600 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
602 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605 PMD_INIT_FUNC_TRACE();
607 /* Return if it didn't acquire valid glort range */
608 if (!fm10k_glort_valid(hw))
612 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
613 FM10K_XCAST_MODE_PROMISC);
614 fm10k_mbx_unlock(hw);
616 if (status != FM10K_SUCCESS)
617 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
621 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
623 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 PMD_INIT_FUNC_TRACE();
629 /* Return if it didn't acquire valid glort range */
630 if (!fm10k_glort_valid(hw))
633 if (dev->data->all_multicast == 1)
634 mode = FM10K_XCAST_MODE_ALLMULTI;
636 mode = FM10K_XCAST_MODE_NONE;
639 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
641 fm10k_mbx_unlock(hw);
643 if (status != FM10K_SUCCESS)
644 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
648 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
650 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
653 PMD_INIT_FUNC_TRACE();
655 /* Return if it didn't acquire valid glort range */
656 if (!fm10k_glort_valid(hw))
659 /* If promiscuous mode is enabled, it doesn't make sense to enable
660 * allmulticast and disable promiscuous since fm10k only can select
663 if (dev->data->promiscuous) {
664 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
665 "needn't enable allmulticast");
670 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
671 FM10K_XCAST_MODE_ALLMULTI);
672 fm10k_mbx_unlock(hw);
674 if (status != FM10K_SUCCESS)
675 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
679 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
681 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684 PMD_INIT_FUNC_TRACE();
686 /* Return if it didn't acquire valid glort range */
687 if (!fm10k_glort_valid(hw))
690 if (dev->data->promiscuous) {
691 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
692 "since promisc mode is enabled");
697 /* Change mode to unicast mode */
698 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
699 FM10K_XCAST_MODE_NONE);
700 fm10k_mbx_unlock(hw);
702 if (status != FM10K_SUCCESS)
703 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
706 /* fls = find last set bit = 32 minus the number of leading zeros */
708 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
710 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
712 fm10k_dev_start(struct rte_eth_dev *dev)
714 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717 PMD_INIT_FUNC_TRACE();
719 /* stop, init, then start the hw */
720 diag = fm10k_stop_hw(hw);
721 if (diag != FM10K_SUCCESS) {
722 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
726 diag = fm10k_init_hw(hw);
727 if (diag != FM10K_SUCCESS) {
728 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
732 diag = fm10k_start_hw(hw);
733 if (diag != FM10K_SUCCESS) {
734 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
738 diag = fm10k_dev_tx_init(dev);
740 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
744 diag = fm10k_dev_rx_init(dev);
746 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
750 if (hw->mac.type == fm10k_mac_pf) {
751 /* Establish only VSI 0 as valid */
752 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
754 /* Configure RSS bits used in RETA table */
755 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
756 fls(dev->data->nb_rx_queues - 1) <<
757 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
759 /* Invalidate all other GLORT entries */
760 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
761 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
762 FM10K_DGLORTMAP_NONE);
765 for (i = 0; i < dev->data->nb_rx_queues; i++) {
766 struct fm10k_rx_queue *rxq;
767 rxq = dev->data->rx_queues[i];
769 if (rxq->rx_deferred_start)
771 diag = fm10k_dev_rx_queue_start(dev, i);
774 for (j = 0; j < i; ++j)
775 rx_queue_clean(dev->data->rx_queues[j]);
780 for (i = 0; i < dev->data->nb_tx_queues; i++) {
781 struct fm10k_tx_queue *txq;
782 txq = dev->data->tx_queues[i];
784 if (txq->tx_deferred_start)
786 diag = fm10k_dev_tx_queue_start(dev, i);
789 for (j = 0; j < dev->data->nb_rx_queues; ++j)
790 rx_queue_clean(dev->data->rx_queues[j]);
795 /* Update default vlan */
796 if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID)
797 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
803 fm10k_dev_stop(struct rte_eth_dev *dev)
807 PMD_INIT_FUNC_TRACE();
809 if (dev->data->tx_queues)
810 for (i = 0; i < dev->data->nb_tx_queues; i++)
811 fm10k_dev_tx_queue_stop(dev, i);
813 if (dev->data->rx_queues)
814 for (i = 0; i < dev->data->nb_rx_queues; i++)
815 fm10k_dev_rx_queue_stop(dev, i);
819 fm10k_dev_queue_release(struct rte_eth_dev *dev)
823 PMD_INIT_FUNC_TRACE();
825 if (dev->data->tx_queues) {
826 for (i = 0; i < dev->data->nb_tx_queues; i++)
827 fm10k_tx_queue_release(dev->data->tx_queues[i]);
830 if (dev->data->rx_queues) {
831 for (i = 0; i < dev->data->nb_rx_queues; i++)
832 fm10k_rx_queue_release(dev->data->rx_queues[i]);
837 fm10k_dev_close(struct rte_eth_dev *dev)
839 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 PMD_INIT_FUNC_TRACE();
843 fm10k_MACVLAN_remove_all(dev);
845 /* Stop mailbox service first */
846 fm10k_close_mbx_service(hw);
848 fm10k_dev_queue_release(dev);
853 fm10k_link_update(struct rte_eth_dev *dev,
854 __rte_unused int wait_to_complete)
856 PMD_INIT_FUNC_TRACE();
858 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
859 * x8 PCIe interface. For now, we leave the speed undefined since there
860 * is no 50Gbps Ethernet. */
861 dev->data->dev_link.link_speed = 0;
862 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
863 dev->data->dev_link.link_status = 1;
869 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
871 uint64_t ipackets, opackets, ibytes, obytes;
872 struct fm10k_hw *hw =
873 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 struct fm10k_hw_stats *hw_stats =
875 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
878 PMD_INIT_FUNC_TRACE();
880 fm10k_update_hw_stats(hw, hw_stats);
882 ipackets = opackets = ibytes = obytes = 0;
883 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
884 (i < hw->mac.max_queues); ++i) {
885 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
886 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
887 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
888 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
889 ipackets += stats->q_ipackets[i];
890 opackets += stats->q_opackets[i];
891 ibytes += stats->q_ibytes[i];
892 obytes += stats->q_obytes[i];
894 stats->ipackets = ipackets;
895 stats->opackets = opackets;
896 stats->ibytes = ibytes;
897 stats->obytes = obytes;
901 fm10k_stats_reset(struct rte_eth_dev *dev)
903 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
904 struct fm10k_hw_stats *hw_stats =
905 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
907 PMD_INIT_FUNC_TRACE();
909 memset(hw_stats, 0, sizeof(*hw_stats));
910 fm10k_rebind_hw_stats(hw, hw_stats);
914 fm10k_dev_infos_get(struct rte_eth_dev *dev,
915 struct rte_eth_dev_info *dev_info)
917 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 PMD_INIT_FUNC_TRACE();
921 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
922 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
923 dev_info->max_rx_queues = hw->mac.max_queues;
924 dev_info->max_tx_queues = hw->mac.max_queues;
925 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
926 dev_info->max_hash_mac_addrs = 0;
927 dev_info->max_vfs = dev->pci_dev->max_vfs;
928 dev_info->max_vmdq_pools = ETH_64_POOLS;
929 dev_info->rx_offload_capa =
930 DEV_RX_OFFLOAD_VLAN_STRIP |
931 DEV_RX_OFFLOAD_IPV4_CKSUM |
932 DEV_RX_OFFLOAD_UDP_CKSUM |
933 DEV_RX_OFFLOAD_TCP_CKSUM;
934 dev_info->tx_offload_capa =
935 DEV_TX_OFFLOAD_VLAN_INSERT |
936 DEV_TX_OFFLOAD_IPV4_CKSUM |
937 DEV_TX_OFFLOAD_UDP_CKSUM |
938 DEV_TX_OFFLOAD_TCP_CKSUM;
940 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
941 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
943 dev_info->default_rxconf = (struct rte_eth_rxconf) {
945 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
946 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
947 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
949 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
953 dev_info->default_txconf = (struct rte_eth_txconf) {
955 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
956 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
957 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
959 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
960 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
961 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
962 ETH_TXQ_FLAGS_NOOFFLOADS,
968 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
971 uint16_t mac_num = 0;
972 uint32_t vid_idx, vid_bit, mac_index;
974 struct fm10k_macvlan_filter_info *macvlan;
975 struct rte_eth_dev_data *data = dev->data;
977 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
980 if (vlan_id > ETH_VLAN_ID_MAX) {
981 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
985 vid_idx = FM10K_VFTA_IDX(vlan_id);
986 vid_bit = FM10K_VFTA_BIT(vlan_id);
987 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
988 if (on && (macvlan->vfta[vid_idx] & vid_bit))
990 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
991 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
992 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
993 "in the VLAN filter table");
998 result = fm10k_update_vlan(hw, vlan_id, 0, on);
999 fm10k_mbx_unlock(hw);
1000 if (result != FM10K_SUCCESS) {
1001 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1005 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1006 (result == FM10K_SUCCESS); mac_index++) {
1007 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1009 if (mac_num > macvlan->mac_num - 1) {
1010 PMD_INIT_LOG(ERR, "MAC address number "
1015 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1016 data->mac_addrs[mac_index].addr_bytes,
1018 fm10k_mbx_unlock(hw);
1021 if (result != FM10K_SUCCESS) {
1022 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1027 macvlan->vlan_num++;
1028 macvlan->vfta[vid_idx] |= vid_bit;
1030 macvlan->vlan_num--;
1031 macvlan->vfta[vid_idx] &= ~vid_bit;
1037 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1039 if (mask & ETH_VLAN_STRIP_MASK) {
1040 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1041 PMD_INIT_LOG(ERR, "VLAN stripping is "
1042 "always on in fm10k");
1045 if (mask & ETH_VLAN_EXTEND_MASK) {
1046 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1047 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1048 "supported in fm10k");
1051 if (mask & ETH_VLAN_FILTER_MASK) {
1052 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1053 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1057 /* Add/Remove a MAC address, and update filters */
1059 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1062 struct fm10k_hw *hw;
1063 struct fm10k_macvlan_filter_info *macvlan;
1065 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1069 for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1070 if (macvlan->vfta[j]) {
1071 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1072 if (macvlan->vfta[j] & (1 << k)) {
1073 if (i + 1 > macvlan->vlan_num) {
1074 PMD_INIT_LOG(ERR, "vlan number "
1079 fm10k_update_uc_addr(hw,
1080 hw->mac.dglort_map, mac,
1081 j * FM10K_UINT32_BIT_SIZE + k,
1083 fm10k_mbx_unlock(hw);
1096 /* Add a MAC address, and update filters */
1098 fm10k_macaddr_add(struct rte_eth_dev *dev,
1099 struct ether_addr *mac_addr,
1100 __rte_unused uint32_t index,
1101 __rte_unused uint32_t pool)
1103 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1106 /* Remove a MAC address, and update filters */
1108 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1110 struct rte_eth_dev_data *data = dev->data;
1112 if (index < FM10K_MAX_MACADDR_NUM)
1113 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1117 /* Remove all VLAN and MAC address table entries */
1119 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev)
1122 struct fm10k_macvlan_filter_info *macvlan;
1124 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1125 for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1126 if (macvlan->vfta[j]) {
1127 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1128 if (macvlan->vfta[j] & (1 << k))
1129 fm10k_vlan_filter_set(dev,
1130 j * FM10K_UINT32_BIT_SIZE + k, false);
1137 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1139 if ((request < min) || (request > max) || ((request % mult) != 0))
1146 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1147 * the physical address is required. If the memzone is already created, then
1148 * this function returns a pointer to the existing memzone.
1150 static inline const struct rte_memzone *
1151 allocate_hw_ring(const char *driver_name, const char *ring_name,
1152 uint8_t port_id, uint16_t queue_id, int socket_id,
1153 uint32_t size, uint32_t align)
1155 char name[RTE_MEMZONE_NAMESIZE];
1156 const struct rte_memzone *mz;
1158 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1159 driver_name, ring_name, port_id, queue_id, socket_id);
1161 /* return the memzone if it already exists */
1162 mz = rte_memzone_lookup(name);
1166 #ifdef RTE_LIBRTE_XEN_DOM0
1167 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1170 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1175 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1177 if ((request < min) || (request > max) || ((div % request) != 0))
1184 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1186 uint16_t rx_free_thresh;
1188 if (conf->rx_free_thresh == 0)
1189 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1191 rx_free_thresh = conf->rx_free_thresh;
1193 /* make sure the requested threshold satisfies the constraints */
1194 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1195 FM10K_RX_FREE_THRESH_MAX(q),
1196 FM10K_RX_FREE_THRESH_DIV(q),
1198 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1199 "less than or equal to %u, "
1200 "greater than or equal to %u, "
1201 "and a divisor of %u",
1202 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1203 FM10K_RX_FREE_THRESH_MIN(q),
1204 FM10K_RX_FREE_THRESH_DIV(q));
1208 q->alloc_thresh = rx_free_thresh;
1209 q->drop_en = conf->rx_drop_en;
1210 q->rx_deferred_start = conf->rx_deferred_start;
1216 * Hardware requires specific alignment for Rx packet buffers. At
1217 * least one of the following two conditions must be satisfied.
1218 * 1. Address is 512B aligned
1219 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1221 * As such, the driver may need to adjust the DMA address within the
1222 * buffer by up to 512B.
1224 * return 1 if the element size is valid, otherwise return 0.
1227 mempool_element_size_valid(struct rte_mempool *mp)
1231 /* elt_size includes mbuf header and headroom */
1232 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1233 RTE_PKTMBUF_HEADROOM;
1235 /* account for up to 512B of alignment */
1236 min_size -= FM10K_RX_DATABUF_ALIGN;
1238 /* sanity check for overflow */
1239 if (min_size > mp->elt_size)
1247 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1248 uint16_t nb_desc, unsigned int socket_id,
1249 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1251 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1252 struct fm10k_rx_queue *q;
1253 const struct rte_memzone *mz;
1255 PMD_INIT_FUNC_TRACE();
1257 /* make sure the mempool element size can account for alignment. */
1258 if (!mempool_element_size_valid(mp)) {
1259 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1263 /* make sure a valid number of descriptors have been requested */
1264 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1265 FM10K_MULT_RX_DESC, nb_desc)) {
1266 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1267 "less than or equal to %"PRIu32", "
1268 "greater than or equal to %u, "
1269 "and a multiple of %u",
1270 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1271 FM10K_MULT_RX_DESC);
1276 * if this queue existed already, free the associated memory. The
1277 * queue cannot be reused in case we need to allocate memory on
1278 * different socket than was previously used.
1280 if (dev->data->rx_queues[queue_id] != NULL) {
1281 rx_queue_free(dev->data->rx_queues[queue_id]);
1282 dev->data->rx_queues[queue_id] = NULL;
1285 /* allocate memory for the queue structure */
1286 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1289 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1295 q->nb_desc = nb_desc;
1296 q->port_id = dev->data->port_id;
1297 q->queue_id = queue_id;
1298 q->tail_ptr = (volatile uint32_t *)
1299 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1300 if (handle_rxconf(q, conf))
1303 /* allocate memory for the software ring */
1304 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1305 nb_desc * sizeof(struct rte_mbuf *),
1306 RTE_CACHE_LINE_SIZE, socket_id);
1307 if (q->sw_ring == NULL) {
1308 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1314 * allocate memory for the hardware descriptor ring. A memzone large
1315 * enough to hold the maximum ring size is requested to allow for
1316 * resizing in later calls to the queue setup function.
1318 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1319 dev->data->port_id, queue_id, socket_id,
1320 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1322 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1323 rte_free(q->sw_ring);
1327 q->hw_ring = mz->addr;
1328 #ifdef RTE_LIBRTE_XEN_DOM0
1329 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1331 q->hw_ring_phys_addr = mz->phys_addr;
1334 dev->data->rx_queues[queue_id] = q;
1339 fm10k_rx_queue_release(void *queue)
1341 PMD_INIT_FUNC_TRACE();
1343 rx_queue_free(queue);
1347 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1349 uint16_t tx_free_thresh;
1350 uint16_t tx_rs_thresh;
1352 /* constraint MACROs require that tx_free_thresh is configured
1353 * before tx_rs_thresh */
1354 if (conf->tx_free_thresh == 0)
1355 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1357 tx_free_thresh = conf->tx_free_thresh;
1359 /* make sure the requested threshold satisfies the constraints */
1360 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1361 FM10K_TX_FREE_THRESH_MAX(q),
1362 FM10K_TX_FREE_THRESH_DIV(q),
1364 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1365 "less than or equal to %u, "
1366 "greater than or equal to %u, "
1367 "and a divisor of %u",
1368 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1369 FM10K_TX_FREE_THRESH_MIN(q),
1370 FM10K_TX_FREE_THRESH_DIV(q));
1374 q->free_thresh = tx_free_thresh;
1376 if (conf->tx_rs_thresh == 0)
1377 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1379 tx_rs_thresh = conf->tx_rs_thresh;
1381 q->tx_deferred_start = conf->tx_deferred_start;
1383 /* make sure the requested threshold satisfies the constraints */
1384 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1385 FM10K_TX_RS_THRESH_MAX(q),
1386 FM10K_TX_RS_THRESH_DIV(q),
1388 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1389 "less than or equal to %u, "
1390 "greater than or equal to %u, "
1391 "and a divisor of %u",
1392 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1393 FM10K_TX_RS_THRESH_MIN(q),
1394 FM10K_TX_RS_THRESH_DIV(q));
1398 q->rs_thresh = tx_rs_thresh;
1404 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1405 uint16_t nb_desc, unsigned int socket_id,
1406 const struct rte_eth_txconf *conf)
1408 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1409 struct fm10k_tx_queue *q;
1410 const struct rte_memzone *mz;
1412 PMD_INIT_FUNC_TRACE();
1414 /* make sure a valid number of descriptors have been requested */
1415 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1416 FM10K_MULT_TX_DESC, nb_desc)) {
1417 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1418 "less than or equal to %"PRIu32", "
1419 "greater than or equal to %u, "
1420 "and a multiple of %u",
1421 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1422 FM10K_MULT_TX_DESC);
1427 * if this queue existed already, free the associated memory. The
1428 * queue cannot be reused in case we need to allocate memory on
1429 * different socket than was previously used.
1431 if (dev->data->tx_queues[queue_id] != NULL) {
1432 tx_queue_free(dev->data->tx_queues[queue_id]);
1433 dev->data->tx_queues[queue_id] = NULL;
1436 /* allocate memory for the queue structure */
1437 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1440 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1445 q->nb_desc = nb_desc;
1446 q->port_id = dev->data->port_id;
1447 q->queue_id = queue_id;
1448 q->tail_ptr = (volatile uint32_t *)
1449 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1450 if (handle_txconf(q, conf))
1453 /* allocate memory for the software ring */
1454 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1455 nb_desc * sizeof(struct rte_mbuf *),
1456 RTE_CACHE_LINE_SIZE, socket_id);
1457 if (q->sw_ring == NULL) {
1458 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1464 * allocate memory for the hardware descriptor ring. A memzone large
1465 * enough to hold the maximum ring size is requested to allow for
1466 * resizing in later calls to the queue setup function.
1468 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1469 dev->data->port_id, queue_id, socket_id,
1470 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1472 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1473 rte_free(q->sw_ring);
1477 q->hw_ring = mz->addr;
1478 #ifdef RTE_LIBRTE_XEN_DOM0
1479 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1481 q->hw_ring_phys_addr = mz->phys_addr;
1485 * allocate memory for the RS bit tracker. Enough slots to hold the
1486 * descriptor index for each RS bit needing to be set are required.
1488 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1489 ((nb_desc + 1) / q->rs_thresh) *
1491 RTE_CACHE_LINE_SIZE, socket_id);
1492 if (q->rs_tracker.list == NULL) {
1493 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1494 rte_free(q->sw_ring);
1499 dev->data->tx_queues[queue_id] = q;
1504 fm10k_tx_queue_release(void *queue)
1506 PMD_INIT_FUNC_TRACE();
1508 tx_queue_free(queue);
1512 fm10k_reta_update(struct rte_eth_dev *dev,
1513 struct rte_eth_rss_reta_entry64 *reta_conf,
1516 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1517 uint16_t i, j, idx, shift;
1521 PMD_INIT_FUNC_TRACE();
1523 if (reta_size > FM10K_MAX_RSS_INDICES) {
1524 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1525 "(%d) doesn't match the number hardware can supported "
1526 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1531 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1532 * 128-entries in 32 registers
1534 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1535 idx = i / RTE_RETA_GROUP_SIZE;
1536 shift = i % RTE_RETA_GROUP_SIZE;
1537 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1538 BIT_MASK_PER_UINT32);
1543 if (mask != BIT_MASK_PER_UINT32)
1544 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1546 for (j = 0; j < CHARS_PER_UINT32; j++) {
1547 if (mask & (0x1 << j)) {
1549 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1550 reta |= reta_conf[idx].reta[shift + j] <<
1554 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1561 fm10k_reta_query(struct rte_eth_dev *dev,
1562 struct rte_eth_rss_reta_entry64 *reta_conf,
1565 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1566 uint16_t i, j, idx, shift;
1570 PMD_INIT_FUNC_TRACE();
1572 if (reta_size < FM10K_MAX_RSS_INDICES) {
1573 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1574 "(%d) doesn't match the number hardware can supported "
1575 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1580 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1581 * 128-entries in 32 registers
1583 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1584 idx = i / RTE_RETA_GROUP_SIZE;
1585 shift = i % RTE_RETA_GROUP_SIZE;
1586 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1587 BIT_MASK_PER_UINT32);
1591 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1592 for (j = 0; j < CHARS_PER_UINT32; j++) {
1593 if (mask & (0x1 << j))
1594 reta_conf[idx].reta[shift + j] = ((reta >>
1595 CHAR_BIT * j) & UINT8_MAX);
1603 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1604 struct rte_eth_rss_conf *rss_conf)
1606 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1607 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1609 uint64_t hf = rss_conf->rss_hf;
1612 PMD_INIT_FUNC_TRACE();
1614 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1615 FM10K_RSSRK_ENTRIES_PER_REG)
1622 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1623 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1624 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1625 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1626 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1627 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1628 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1629 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1630 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1632 /* If the mapping doesn't fit any supported, return */
1637 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1638 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1640 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1646 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1647 struct rte_eth_rss_conf *rss_conf)
1649 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1650 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1655 PMD_INIT_FUNC_TRACE();
1657 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1658 FM10K_RSSRK_ENTRIES_PER_REG)
1662 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1663 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1665 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1667 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1668 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1669 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1670 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1671 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1672 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1673 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1674 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1675 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1677 rss_conf->rss_hf = hf;
1683 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1685 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1688 /* Bind all local non-queue interrupt to vector 0 */
1691 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1692 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1693 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1694 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1695 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1696 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1698 /* Enable misc causes */
1699 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1700 FM10K_EIMR_ENABLE(THI_FAULT) |
1701 FM10K_EIMR_ENABLE(FUM_FAULT) |
1702 FM10K_EIMR_ENABLE(MAILBOX) |
1703 FM10K_EIMR_ENABLE(SWITCHREADY) |
1704 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1705 FM10K_EIMR_ENABLE(SRAMERROR) |
1706 FM10K_EIMR_ENABLE(VFLR));
1709 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1710 FM10K_ITR_MASK_CLEAR);
1711 FM10K_WRITE_FLUSH(hw);
1715 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
1717 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1718 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1722 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1723 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1724 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1725 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1726 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1727 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1729 /* Disable misc causes */
1730 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
1731 FM10K_EIMR_DISABLE(THI_FAULT) |
1732 FM10K_EIMR_DISABLE(FUM_FAULT) |
1733 FM10K_EIMR_DISABLE(MAILBOX) |
1734 FM10K_EIMR_DISABLE(SWITCHREADY) |
1735 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1736 FM10K_EIMR_DISABLE(SRAMERROR) |
1737 FM10K_EIMR_DISABLE(VFLR));
1740 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
1741 FM10K_WRITE_FLUSH(hw);
1745 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1747 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1750 /* Bind all local non-queue interrupt to vector 0 */
1753 /* Only INT 0 available, other 15 are reserved. */
1754 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1757 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1758 FM10K_ITR_MASK_CLEAR);
1759 FM10K_WRITE_FLUSH(hw);
1763 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
1765 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1766 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1770 /* Only INT 0 available, other 15 are reserved. */
1771 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1774 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
1775 FM10K_WRITE_FLUSH(hw);
1779 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1781 struct fm10k_fault fault;
1783 const char *estr = "Unknown error";
1785 /* Process PCA fault */
1786 if (eicr & FM10K_EICR_PCA_FAULT) {
1787 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1790 switch (fault.type) {
1792 estr = "PCA_NO_FAULT"; break;
1793 case PCA_UNMAPPED_ADDR:
1794 estr = "PCA_UNMAPPED_ADDR"; break;
1795 case PCA_BAD_QACCESS_PF:
1796 estr = "PCA_BAD_QACCESS_PF"; break;
1797 case PCA_BAD_QACCESS_VF:
1798 estr = "PCA_BAD_QACCESS_VF"; break;
1799 case PCA_MALICIOUS_REQ:
1800 estr = "PCA_MALICIOUS_REQ"; break;
1801 case PCA_POISONED_TLP:
1802 estr = "PCA_POISONED_TLP"; break;
1804 estr = "PCA_TLP_ABORT"; break;
1808 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1809 estr, fault.func ? "VF" : "PF", fault.func,
1810 fault.address, fault.specinfo);
1813 /* Process THI fault */
1814 if (eicr & FM10K_EICR_THI_FAULT) {
1815 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1818 switch (fault.type) {
1820 estr = "THI_NO_FAULT"; break;
1821 case THI_MAL_DIS_Q_FAULT:
1822 estr = "THI_MAL_DIS_Q_FAULT"; break;
1826 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1827 estr, fault.func ? "VF" : "PF", fault.func,
1828 fault.address, fault.specinfo);
1831 /* Process FUM fault */
1832 if (eicr & FM10K_EICR_FUM_FAULT) {
1833 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1836 switch (fault.type) {
1838 estr = "FUM_NO_FAULT"; break;
1839 case FUM_UNMAPPED_ADDR:
1840 estr = "FUM_UNMAPPED_ADDR"; break;
1841 case FUM_POISONED_TLP:
1842 estr = "FUM_POISONED_TLP"; break;
1843 case FUM_BAD_VF_QACCESS:
1844 estr = "FUM_BAD_VF_QACCESS"; break;
1845 case FUM_ADD_DECODE_ERR:
1846 estr = "FUM_ADD_DECODE_ERR"; break;
1848 estr = "FUM_RO_ERROR"; break;
1849 case FUM_QPRC_CRC_ERROR:
1850 estr = "FUM_QPRC_CRC_ERROR"; break;
1851 case FUM_CSR_TIMEOUT:
1852 estr = "FUM_CSR_TIMEOUT"; break;
1853 case FUM_INVALID_TYPE:
1854 estr = "FUM_INVALID_TYPE"; break;
1855 case FUM_INVALID_LENGTH:
1856 estr = "FUM_INVALID_LENGTH"; break;
1857 case FUM_INVALID_BE:
1858 estr = "FUM_INVALID_BE"; break;
1859 case FUM_INVALID_ALIGN:
1860 estr = "FUM_INVALID_ALIGN"; break;
1864 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1865 estr, fault.func ? "VF" : "PF", fault.func,
1866 fault.address, fault.specinfo);
1871 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1876 * PF interrupt handler triggered by NIC for handling specific interrupt.
1879 * Pointer to interrupt handle.
1881 * The address of parameter (struct rte_eth_dev *) regsitered before.
1887 fm10k_dev_interrupt_handler_pf(
1888 __rte_unused struct rte_intr_handle *handle,
1891 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1892 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1893 uint32_t cause, status;
1895 if (hw->mac.type != fm10k_mac_pf)
1898 cause = FM10K_READ_REG(hw, FM10K_EICR);
1900 /* Handle PCI fault cases */
1901 if (cause & FM10K_EICR_FAULT_MASK) {
1902 PMD_INIT_LOG(ERR, "INT: find fault!");
1903 fm10k_dev_handle_fault(hw, cause);
1906 /* Handle switch up/down */
1907 if (cause & FM10K_EICR_SWITCHNOTREADY)
1908 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1910 if (cause & FM10K_EICR_SWITCHREADY)
1911 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1913 /* Handle mailbox message */
1915 hw->mbx.ops.process(hw, &hw->mbx);
1916 fm10k_mbx_unlock(hw);
1918 /* Handle SRAM error */
1919 if (cause & FM10K_EICR_SRAMERROR) {
1920 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1922 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1923 /* Write to clear pending bits */
1924 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1926 /* Todo: print out error message after shared code updates */
1929 /* Clear these 3 events if having any */
1930 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1931 FM10K_EICR_SWITCHREADY;
1933 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1935 /* Re-enable interrupt from device side */
1936 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1937 FM10K_ITR_MASK_CLEAR);
1938 /* Re-enable interrupt from host side */
1939 rte_intr_enable(&(dev->pci_dev->intr_handle));
1943 * VF interrupt handler triggered by NIC for handling specific interrupt.
1946 * Pointer to interrupt handle.
1948 * The address of parameter (struct rte_eth_dev *) regsitered before.
1954 fm10k_dev_interrupt_handler_vf(
1955 __rte_unused struct rte_intr_handle *handle,
1958 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1959 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1961 if (hw->mac.type != fm10k_mac_vf)
1964 /* Handle mailbox message if lock is acquired */
1966 hw->mbx.ops.process(hw, &hw->mbx);
1967 fm10k_mbx_unlock(hw);
1969 /* Re-enable interrupt from device side */
1970 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1971 FM10K_ITR_MASK_CLEAR);
1972 /* Re-enable interrupt from host side */
1973 rte_intr_enable(&(dev->pci_dev->intr_handle));
1976 /* Mailbox message handler in VF */
1977 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1978 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1979 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1980 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1981 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1984 /* Mailbox message handler in PF */
1985 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1986 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1987 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1988 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1989 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1990 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1991 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1992 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1996 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2000 /* Initialize mailbox lock */
2001 fm10k_mbx_initlock(hw);
2003 /* Replace default message handler with new ones */
2004 if (hw->mac.type == fm10k_mac_pf)
2005 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2007 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2010 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2014 /* Connect to SM for PF device or PF for VF device */
2015 return hw->mbx.ops.connect(hw, &hw->mbx);
2019 fm10k_close_mbx_service(struct fm10k_hw *hw)
2021 /* Disconnect from SM for PF device or PF for VF device */
2022 hw->mbx.ops.disconnect(hw, &hw->mbx);
2025 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2026 .dev_configure = fm10k_dev_configure,
2027 .dev_start = fm10k_dev_start,
2028 .dev_stop = fm10k_dev_stop,
2029 .dev_close = fm10k_dev_close,
2030 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2031 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2032 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2033 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2034 .stats_get = fm10k_stats_get,
2035 .stats_reset = fm10k_stats_reset,
2036 .link_update = fm10k_link_update,
2037 .dev_infos_get = fm10k_dev_infos_get,
2038 .vlan_filter_set = fm10k_vlan_filter_set,
2039 .vlan_offload_set = fm10k_vlan_offload_set,
2040 .mac_addr_add = fm10k_macaddr_add,
2041 .mac_addr_remove = fm10k_macaddr_remove,
2042 .rx_queue_start = fm10k_dev_rx_queue_start,
2043 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2044 .tx_queue_start = fm10k_dev_tx_queue_start,
2045 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2046 .rx_queue_setup = fm10k_rx_queue_setup,
2047 .rx_queue_release = fm10k_rx_queue_release,
2048 .tx_queue_setup = fm10k_tx_queue_setup,
2049 .tx_queue_release = fm10k_tx_queue_release,
2050 .reta_update = fm10k_reta_update,
2051 .reta_query = fm10k_reta_query,
2052 .rss_hash_update = fm10k_rss_hash_update,
2053 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2057 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2059 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2061 struct fm10k_macvlan_filter_info *macvlan;
2063 PMD_INIT_FUNC_TRACE();
2065 dev->dev_ops = &fm10k_eth_dev_ops;
2066 dev->rx_pkt_burst = &fm10k_recv_pkts;
2067 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2069 if (dev->data->scattered_rx)
2070 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2072 /* only initialize in the primary process */
2073 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2076 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2077 memset(macvlan, 0, sizeof(*macvlan));
2078 /* Vendor and Device ID need to be set before init of shared code */
2079 memset(hw, 0, sizeof(*hw));
2080 hw->device_id = dev->pci_dev->id.device_id;
2081 hw->vendor_id = dev->pci_dev->id.vendor_id;
2082 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2083 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2084 hw->revision_id = 0;
2085 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2086 if (hw->hw_addr == NULL) {
2087 PMD_INIT_LOG(ERR, "Bad mem resource."
2088 " Try to blacklist unused devices.");
2092 /* Store fm10k_adapter pointer */
2093 hw->back = dev->data->dev_private;
2095 /* Initialize the shared code */
2096 diag = fm10k_init_shared_code(hw);
2097 if (diag != FM10K_SUCCESS) {
2098 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2103 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2104 * there is no way to get link status without reading BAR4. Until this
2105 * works, assume we have maximum bandwidth.
2106 * @todo - fix bus info
2108 hw->bus_caps.speed = fm10k_bus_speed_8000;
2109 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2110 hw->bus_caps.payload = fm10k_bus_payload_512;
2111 hw->bus.speed = fm10k_bus_speed_8000;
2112 hw->bus.width = fm10k_bus_width_pcie_x8;
2113 hw->bus.payload = fm10k_bus_payload_256;
2115 /* Initialize the hw */
2116 diag = fm10k_init_hw(hw);
2117 if (diag != FM10K_SUCCESS) {
2118 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2122 /* Initialize MAC address(es) */
2123 dev->data->mac_addrs = rte_zmalloc("fm10k",
2124 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2125 if (dev->data->mac_addrs == NULL) {
2126 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2130 diag = fm10k_read_mac_addr(hw);
2132 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2133 &dev->data->mac_addrs[0]);
2135 if (diag != FM10K_SUCCESS ||
2136 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2138 /* Generate a random addr */
2139 eth_random_addr(hw->mac.addr);
2140 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2141 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2142 &dev->data->mac_addrs[0]);
2145 /* Reset the hw statistics */
2146 fm10k_stats_reset(dev);
2149 diag = fm10k_reset_hw(hw);
2150 if (diag != FM10K_SUCCESS) {
2151 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2155 /* Setup mailbox service */
2156 diag = fm10k_setup_mbx_service(hw);
2157 if (diag != FM10K_SUCCESS) {
2158 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2162 /*PF/VF has different interrupt handling mechanism */
2163 if (hw->mac.type == fm10k_mac_pf) {
2164 /* register callback func to eal lib */
2165 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2166 fm10k_dev_interrupt_handler_pf, (void *)dev);
2168 /* enable MISC interrupt */
2169 fm10k_dev_enable_intr_pf(dev);
2171 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2172 fm10k_dev_interrupt_handler_vf, (void *)dev);
2174 fm10k_dev_enable_intr_vf(dev);
2177 /* Enable uio intr after callback registered */
2178 rte_intr_enable(&(dev->pci_dev->intr_handle));
2180 hw->mac.ops.update_int_moderator(hw);
2182 /* Make sure Switch Manager is ready before going forward. */
2183 if (hw->mac.type == fm10k_mac_pf) {
2184 int switch_ready = 0;
2187 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2189 hw->mac.ops.get_host_state(hw, &switch_ready);
2190 fm10k_mbx_unlock(hw);
2193 /* Delay some time to acquire async LPORT_MAP info. */
2194 rte_delay_us(WAIT_SWITCH_MSG_US);
2197 if (switch_ready == 0) {
2198 PMD_INIT_LOG(ERR, "switch is not ready");
2204 * Below function will trigger operations on mailbox, acquire lock to
2205 * avoid race condition from interrupt handler. Operations on mailbox
2206 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2207 * will handle and generate an interrupt to our side. Then, FIFO in
2208 * mailbox will be touched.
2211 /* Enable port first */
2212 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2214 /* Set unicast mode by default. App can change to other mode in other
2217 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2218 FM10K_XCAST_MODE_NONE);
2220 fm10k_mbx_unlock(hw);
2222 /* Add default mac address */
2223 fm10k_MAC_filter_set(dev, hw->mac.addr, true);
2229 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2231 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2233 PMD_INIT_FUNC_TRACE();
2235 /* only uninitialize in the primary process */
2236 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2239 /* safe to close dev here */
2240 fm10k_dev_close(dev);
2242 dev->dev_ops = NULL;
2243 dev->rx_pkt_burst = NULL;
2244 dev->tx_pkt_burst = NULL;
2246 /* disable uio/vfio intr */
2247 rte_intr_disable(&(dev->pci_dev->intr_handle));
2249 /*PF/VF has different interrupt handling mechanism */
2250 if (hw->mac.type == fm10k_mac_pf) {
2251 /* disable interrupt */
2252 fm10k_dev_disable_intr_pf(dev);
2254 /* unregister callback func to eal lib */
2255 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2256 fm10k_dev_interrupt_handler_pf, (void *)dev);
2258 /* disable interrupt */
2259 fm10k_dev_disable_intr_vf(dev);
2261 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2262 fm10k_dev_interrupt_handler_vf, (void *)dev);
2265 /* free mac memory */
2266 if (dev->data->mac_addrs) {
2267 rte_free(dev->data->mac_addrs);
2268 dev->data->mac_addrs = NULL;
2271 memset(hw, 0, sizeof(*hw));
2277 * The set of PCI devices this driver supports. This driver will enable both PF
2278 * and SRIOV-VF devices.
2280 static const struct rte_pci_id pci_id_fm10k_map[] = {
2281 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2282 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2283 #include "rte_pci_dev_ids.h"
2284 { .vendor_id = 0, /* sentinel */ },
2287 static struct eth_driver rte_pmd_fm10k = {
2289 .name = "rte_pmd_fm10k",
2290 .id_table = pci_id_fm10k_map,
2291 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2293 .eth_dev_init = eth_fm10k_dev_init,
2294 .eth_dev_uninit = eth_fm10k_dev_uninit,
2295 .dev_private_size = sizeof(struct fm10k_adapter),
2299 * Driver initialization routine.
2300 * Invoked once at EAL init time.
2301 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2304 rte_pmd_fm10k_init(__rte_unused const char *name,
2305 __rte_unused const char *params)
2307 PMD_INIT_FUNC_TRACE();
2308 rte_eth_driver_register(&rte_pmd_fm10k);
2312 static struct rte_driver rte_fm10k_driver = {
2314 .init = rte_pmd_fm10k_init,
2317 PMD_REGISTER_DRIVER(rte_fm10k_driver);