4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US 100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
67 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
72 fm10k_mbx_initlock(struct fm10k_hw *hw)
74 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
78 fm10k_mbx_lock(struct fm10k_hw *hw)
80 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
81 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
85 fm10k_mbx_unlock(struct fm10k_hw *hw)
87 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
91 * reset queue to initial state, allocate software buffers used when starting
94 * return -ENOMEM if buffers cannot be allocated
95 * return -EINVAL if buffers do not satisfy alignment condition
98 rx_queue_reset(struct fm10k_rx_queue *q)
102 PMD_INIT_FUNC_TRACE();
104 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
108 for (i = 0; i < q->nb_desc; ++i) {
109 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
110 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
111 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
115 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
116 q->hw_ring[i].q.pkt_addr = dma_addr;
117 q->hw_ring[i].q.hdr_addr = dma_addr;
122 q->next_trigger = q->alloc_thresh - 1;
123 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
128 * clean queue, descriptor rings, free software buffers used when stopping
132 rx_queue_clean(struct fm10k_rx_queue *q)
134 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
136 PMD_INIT_FUNC_TRACE();
138 /* zero descriptor rings */
139 for (i = 0; i < q->nb_desc; ++i)
140 q->hw_ring[i] = zero;
142 /* free software buffers */
143 for (i = 0; i < q->nb_desc; ++i) {
145 rte_pktmbuf_free_seg(q->sw_ring[i]);
146 q->sw_ring[i] = NULL;
152 * free all queue memory used when releasing the queue (i.e. configure)
155 rx_queue_free(struct fm10k_rx_queue *q)
157 PMD_INIT_FUNC_TRACE();
159 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
162 rte_free(q->sw_ring);
171 * disable RX queue, wait unitl HW finished necessary flush operation
174 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
178 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
179 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
180 reg & ~FM10K_RXQCTL_ENABLE);
182 /* Wait 100us at most */
183 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
185 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
186 if (!(reg & FM10K_RXQCTL_ENABLE))
190 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
197 * reset queue to initial state, allocate software buffers used when starting
201 tx_queue_reset(struct fm10k_tx_queue *q)
203 PMD_INIT_FUNC_TRACE();
207 q->nb_free = q->nb_desc - 1;
208 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
209 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
213 * clean queue, descriptor rings, free software buffers used when stopping
217 tx_queue_clean(struct fm10k_tx_queue *q)
219 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
221 PMD_INIT_FUNC_TRACE();
223 /* zero descriptor rings */
224 for (i = 0; i < q->nb_desc; ++i)
225 q->hw_ring[i] = zero;
227 /* free software buffers */
228 for (i = 0; i < q->nb_desc; ++i) {
230 rte_pktmbuf_free_seg(q->sw_ring[i]);
231 q->sw_ring[i] = NULL;
237 * free all queue memory used when releasing the queue (i.e. configure)
240 tx_queue_free(struct fm10k_tx_queue *q)
242 PMD_INIT_FUNC_TRACE();
244 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
246 if (q->rs_tracker.list) {
247 rte_free(q->rs_tracker.list);
248 q->rs_tracker.list = NULL;
251 rte_free(q->sw_ring);
260 * disable TX queue, wait unitl HW finished necessary flush operation
263 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
267 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
268 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
269 reg & ~FM10K_TXDCTL_ENABLE);
271 /* Wait 100us at most */
272 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
274 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
275 if (!(reg & FM10K_TXDCTL_ENABLE))
279 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
286 fm10k_dev_configure(struct rte_eth_dev *dev)
288 PMD_INIT_FUNC_TRACE();
290 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
291 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
297 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
299 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
300 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
301 uint32_t mrqc, *key, i, reta, j;
304 #define RSS_KEY_SIZE 40
305 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
306 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
307 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
308 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
309 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
310 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
313 if (dev->data->nb_rx_queues == 1 ||
314 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
315 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
318 /* random key is rss_intel_key (default) or user provided (rss_key) */
319 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
320 key = (uint32_t *)rss_intel_key;
322 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
324 /* Now fill our hash function seeds, 4 bytes at a time */
325 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
326 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
329 * Fill in redirection table
330 * The byte-swap is needed because NIC registers are in
331 * little-endian order.
334 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
335 if (j == dev->data->nb_rx_queues)
337 reta = (reta << CHAR_BIT) | j;
339 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
344 * Generate RSS hash based on packet types, TCP/UDP
345 * port numbers and/or IPv4/v6 src and dst addresses
347 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
349 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
350 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
351 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
352 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
353 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
354 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
355 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
356 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
357 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
360 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
365 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
369 fm10k_dev_tx_init(struct rte_eth_dev *dev)
371 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
373 struct fm10k_tx_queue *txq;
377 /* Disable TXINT to avoid possible interrupt */
378 for (i = 0; i < hw->mac.max_queues; i++)
379 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
380 3 << FM10K_TXINT_TIMER_SHIFT);
383 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
384 txq = dev->data->tx_queues[i];
385 base_addr = txq->hw_ring_phys_addr;
386 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
388 /* disable queue to avoid issues while updating state */
389 ret = tx_queue_disable(hw, i);
391 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
395 /* set location and size for descriptor ring */
396 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
397 base_addr & UINT64_LOWER_32BITS_MASK);
398 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
399 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
400 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
406 fm10k_dev_rx_init(struct rte_eth_dev *dev)
408 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
410 struct fm10k_rx_queue *rxq;
413 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
416 /* Disable RXINT to avoid possible interrupt */
417 for (i = 0; i < hw->mac.max_queues; i++)
418 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
419 3 << FM10K_RXINT_TIMER_SHIFT);
421 /* Setup RX queues */
422 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
423 rxq = dev->data->rx_queues[i];
424 base_addr = rxq->hw_ring_phys_addr;
425 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
427 /* disable queue to avoid issues while updating state */
428 ret = rx_queue_disable(hw, i);
430 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
434 /* Setup the Base and Length of the Rx Descriptor Ring */
435 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
436 base_addr & UINT64_LOWER_32BITS_MASK);
437 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
438 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
439 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
441 /* Configure the Rx buffer size for one buff without split */
442 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
443 RTE_PKTMBUF_HEADROOM);
444 /* As RX buffer is aligned to 512B within mbuf, some bytes are
445 * reserved for this purpose, and the worst case could be 511B.
446 * But SRR reg assumes all buffers have the same size. In order
447 * to fill the gap, we'll have to consider the worst case and
448 * assume 512B is reserved. If we don't do so, it's possible
449 * for HW to overwrite data to next mbuf.
451 buf_size -= FM10K_RX_DATABUF_ALIGN;
453 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
454 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
456 /* It adds dual VLAN length for supporting dual VLAN */
457 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
458 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
459 dev->data->dev_conf.rxmode.enable_scatter) {
461 dev->data->scattered_rx = 1;
462 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
463 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
464 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
465 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
468 /* Enable drop on empty, it's RO for VF */
469 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
470 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
472 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
473 FM10K_WRITE_FLUSH(hw);
476 /* Configure RSS if applicable */
477 fm10k_dev_mq_rx_configure(dev);
482 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
484 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
487 struct fm10k_rx_queue *rxq;
489 PMD_INIT_FUNC_TRACE();
491 if (rx_queue_id < dev->data->nb_rx_queues) {
492 rxq = dev->data->rx_queues[rx_queue_id];
493 err = rx_queue_reset(rxq);
494 if (err == -ENOMEM) {
495 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
497 } else if (err == -EINVAL) {
498 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
503 /* Setup the HW Rx Head and Tail Descriptor Pointers
504 * Note: this must be done AFTER the queue is enabled on real
505 * hardware, but BEFORE the queue is enabled when using the
506 * emulation platform. Do it in both places for now and remove
507 * this comment and the following two register writes when the
508 * emulation platform is no longer being used.
510 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
511 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
513 /* Set PF ownership flag for PF devices */
514 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
515 if (hw->mac.type == fm10k_mac_pf)
516 reg |= FM10K_RXQCTL_PF;
517 reg |= FM10K_RXQCTL_ENABLE;
518 /* enable RX queue */
519 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
520 FM10K_WRITE_FLUSH(hw);
522 /* Setup the HW Rx Head and Tail Descriptor Pointers
523 * Note: this must be done AFTER the queue is enabled
525 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
526 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
533 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
535 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537 PMD_INIT_FUNC_TRACE();
539 if (rx_queue_id < dev->data->nb_rx_queues) {
540 /* Disable RX queue */
541 rx_queue_disable(hw, rx_queue_id);
543 /* Free mbuf and clean HW ring */
544 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
551 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
553 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 /** @todo - this should be defined in the shared code */
555 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
556 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
559 PMD_INIT_FUNC_TRACE();
561 if (tx_queue_id < dev->data->nb_tx_queues) {
562 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
564 /* reset head and tail pointers */
565 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
566 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
568 /* enable TX queue */
569 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
570 FM10K_TXDCTL_ENABLE | txdctl);
571 FM10K_WRITE_FLUSH(hw);
579 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
581 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
583 PMD_INIT_FUNC_TRACE();
585 if (tx_queue_id < dev->data->nb_tx_queues) {
586 tx_queue_disable(hw, tx_queue_id);
587 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
593 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
595 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
596 != FM10K_DGLORTMAP_NONE);
600 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
602 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605 PMD_INIT_FUNC_TRACE();
607 /* Return if it didn't acquire valid glort range */
608 if (!fm10k_glort_valid(hw))
612 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
613 FM10K_XCAST_MODE_PROMISC);
614 fm10k_mbx_unlock(hw);
616 if (status != FM10K_SUCCESS)
617 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
621 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
623 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 PMD_INIT_FUNC_TRACE();
629 /* Return if it didn't acquire valid glort range */
630 if (!fm10k_glort_valid(hw))
633 if (dev->data->all_multicast == 1)
634 mode = FM10K_XCAST_MODE_ALLMULTI;
636 mode = FM10K_XCAST_MODE_NONE;
639 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
641 fm10k_mbx_unlock(hw);
643 if (status != FM10K_SUCCESS)
644 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
648 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
650 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
653 PMD_INIT_FUNC_TRACE();
655 /* Return if it didn't acquire valid glort range */
656 if (!fm10k_glort_valid(hw))
659 /* If promiscuous mode is enabled, it doesn't make sense to enable
660 * allmulticast and disable promiscuous since fm10k only can select
663 if (dev->data->promiscuous) {
664 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
665 "needn't enable allmulticast");
670 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
671 FM10K_XCAST_MODE_ALLMULTI);
672 fm10k_mbx_unlock(hw);
674 if (status != FM10K_SUCCESS)
675 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
679 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
681 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684 PMD_INIT_FUNC_TRACE();
686 /* Return if it didn't acquire valid glort range */
687 if (!fm10k_glort_valid(hw))
690 if (dev->data->promiscuous) {
691 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
692 "since promisc mode is enabled");
697 /* Change mode to unicast mode */
698 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
699 FM10K_XCAST_MODE_NONE);
700 fm10k_mbx_unlock(hw);
702 if (status != FM10K_SUCCESS)
703 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
706 /* fls = find last set bit = 32 minus the number of leading zeros */
708 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
710 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
712 fm10k_dev_start(struct rte_eth_dev *dev)
714 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717 PMD_INIT_FUNC_TRACE();
719 /* stop, init, then start the hw */
720 diag = fm10k_stop_hw(hw);
721 if (diag != FM10K_SUCCESS) {
722 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
726 diag = fm10k_init_hw(hw);
727 if (diag != FM10K_SUCCESS) {
728 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
732 diag = fm10k_start_hw(hw);
733 if (diag != FM10K_SUCCESS) {
734 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
738 diag = fm10k_dev_tx_init(dev);
740 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
744 diag = fm10k_dev_rx_init(dev);
746 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
750 if (hw->mac.type == fm10k_mac_pf) {
751 /* Establish only VSI 0 as valid */
752 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
754 /* Configure RSS bits used in RETA table */
755 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
756 fls(dev->data->nb_rx_queues - 1) <<
757 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
759 /* Invalidate all other GLORT entries */
760 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
761 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
762 FM10K_DGLORTMAP_NONE);
765 for (i = 0; i < dev->data->nb_rx_queues; i++) {
766 struct fm10k_rx_queue *rxq;
767 rxq = dev->data->rx_queues[i];
769 if (rxq->rx_deferred_start)
771 diag = fm10k_dev_rx_queue_start(dev, i);
774 for (j = 0; j < i; ++j)
775 rx_queue_clean(dev->data->rx_queues[j]);
780 for (i = 0; i < dev->data->nb_tx_queues; i++) {
781 struct fm10k_tx_queue *txq;
782 txq = dev->data->tx_queues[i];
784 if (txq->tx_deferred_start)
786 diag = fm10k_dev_tx_queue_start(dev, i);
789 for (j = 0; j < dev->data->nb_rx_queues; ++j)
790 rx_queue_clean(dev->data->rx_queues[j]);
795 /* Update default vlan */
796 if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID)
797 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
803 fm10k_dev_stop(struct rte_eth_dev *dev)
807 PMD_INIT_FUNC_TRACE();
809 if (dev->data->tx_queues)
810 for (i = 0; i < dev->data->nb_tx_queues; i++)
811 fm10k_dev_tx_queue_stop(dev, i);
813 if (dev->data->rx_queues)
814 for (i = 0; i < dev->data->nb_rx_queues; i++)
815 fm10k_dev_rx_queue_stop(dev, i);
819 fm10k_dev_queue_release(struct rte_eth_dev *dev)
823 PMD_INIT_FUNC_TRACE();
825 if (dev->data->tx_queues) {
826 for (i = 0; i < dev->data->nb_tx_queues; i++)
827 fm10k_tx_queue_release(dev->data->tx_queues[i]);
830 if (dev->data->rx_queues) {
831 for (i = 0; i < dev->data->nb_rx_queues; i++)
832 fm10k_rx_queue_release(dev->data->rx_queues[i]);
837 fm10k_dev_close(struct rte_eth_dev *dev)
839 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 PMD_INIT_FUNC_TRACE();
843 fm10k_MACVLAN_remove_all(dev);
845 /* Stop mailbox service first */
846 fm10k_close_mbx_service(hw);
848 fm10k_dev_queue_release(dev);
853 fm10k_link_update(struct rte_eth_dev *dev,
854 __rte_unused int wait_to_complete)
856 PMD_INIT_FUNC_TRACE();
858 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
859 * x8 PCIe interface. For now, we leave the speed undefined since there
860 * is no 50Gbps Ethernet. */
861 dev->data->dev_link.link_speed = 0;
862 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
863 dev->data->dev_link.link_status = 1;
869 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
871 uint64_t ipackets, opackets, ibytes, obytes;
872 struct fm10k_hw *hw =
873 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 struct fm10k_hw_stats *hw_stats =
875 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
878 PMD_INIT_FUNC_TRACE();
880 fm10k_update_hw_stats(hw, hw_stats);
882 ipackets = opackets = ibytes = obytes = 0;
883 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
884 (i < hw->mac.max_queues); ++i) {
885 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
886 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
887 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
888 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
889 ipackets += stats->q_ipackets[i];
890 opackets += stats->q_opackets[i];
891 ibytes += stats->q_ibytes[i];
892 obytes += stats->q_obytes[i];
894 stats->ipackets = ipackets;
895 stats->opackets = opackets;
896 stats->ibytes = ibytes;
897 stats->obytes = obytes;
901 fm10k_stats_reset(struct rte_eth_dev *dev)
903 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
904 struct fm10k_hw_stats *hw_stats =
905 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
907 PMD_INIT_FUNC_TRACE();
909 memset(hw_stats, 0, sizeof(*hw_stats));
910 fm10k_rebind_hw_stats(hw, hw_stats);
914 fm10k_dev_infos_get(struct rte_eth_dev *dev,
915 struct rte_eth_dev_info *dev_info)
917 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 PMD_INIT_FUNC_TRACE();
921 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
922 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
923 dev_info->max_rx_queues = hw->mac.max_queues;
924 dev_info->max_tx_queues = hw->mac.max_queues;
925 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
926 dev_info->max_hash_mac_addrs = 0;
927 dev_info->max_vfs = dev->pci_dev->max_vfs;
928 dev_info->max_vmdq_pools = ETH_64_POOLS;
929 dev_info->rx_offload_capa =
930 DEV_RX_OFFLOAD_VLAN_STRIP |
931 DEV_RX_OFFLOAD_IPV4_CKSUM |
932 DEV_RX_OFFLOAD_UDP_CKSUM |
933 DEV_RX_OFFLOAD_TCP_CKSUM;
934 dev_info->tx_offload_capa =
935 DEV_TX_OFFLOAD_VLAN_INSERT;
936 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
937 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
939 dev_info->default_rxconf = (struct rte_eth_rxconf) {
941 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
942 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
943 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
945 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
949 dev_info->default_txconf = (struct rte_eth_txconf) {
951 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
952 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
953 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
955 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
956 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
957 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
958 ETH_TXQ_FLAGS_NOOFFLOADS,
964 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
967 uint16_t mac_num = 0;
968 uint32_t vid_idx, vid_bit, mac_index;
970 struct fm10k_macvlan_filter_info *macvlan;
971 struct rte_eth_dev_data *data = dev->data;
973 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
974 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
976 if (vlan_id > ETH_VLAN_ID_MAX) {
977 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
981 vid_idx = FM10K_VFTA_IDX(vlan_id);
982 vid_bit = FM10K_VFTA_BIT(vlan_id);
983 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
984 if (on && (macvlan->vfta[vid_idx] & vid_bit))
986 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
987 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
988 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
989 "in the VLAN filter table");
994 result = fm10k_update_vlan(hw, vlan_id, 0, on);
995 fm10k_mbx_unlock(hw);
996 if (result != FM10K_SUCCESS) {
997 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1001 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1002 (result == FM10K_SUCCESS); mac_index++) {
1003 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1005 if (mac_num > macvlan->mac_num - 1) {
1006 PMD_INIT_LOG(ERR, "MAC address number "
1011 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1012 data->mac_addrs[mac_index].addr_bytes,
1014 fm10k_mbx_unlock(hw);
1017 if (result != FM10K_SUCCESS) {
1018 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1023 macvlan->vlan_num++;
1024 macvlan->vfta[vid_idx] |= vid_bit;
1026 macvlan->vlan_num--;
1027 macvlan->vfta[vid_idx] &= ~vid_bit;
1033 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1035 if (mask & ETH_VLAN_STRIP_MASK) {
1036 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1037 PMD_INIT_LOG(ERR, "VLAN stripping is "
1038 "always on in fm10k");
1041 if (mask & ETH_VLAN_EXTEND_MASK) {
1042 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1043 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1044 "supported in fm10k");
1047 if (mask & ETH_VLAN_FILTER_MASK) {
1048 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1049 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1053 /* Add/Remove a MAC address, and update filters */
1055 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1058 struct fm10k_hw *hw;
1059 struct fm10k_macvlan_filter_info *macvlan;
1061 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1065 for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1066 if (macvlan->vfta[j]) {
1067 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1068 if (macvlan->vfta[j] & (1 << k)) {
1069 if (i + 1 > macvlan->vlan_num) {
1070 PMD_INIT_LOG(ERR, "vlan number "
1075 fm10k_update_uc_addr(hw,
1076 hw->mac.dglort_map, mac,
1077 j * FM10K_UINT32_BIT_SIZE + k,
1079 fm10k_mbx_unlock(hw);
1092 /* Add a MAC address, and update filters */
1094 fm10k_macaddr_add(struct rte_eth_dev *dev,
1095 struct ether_addr *mac_addr,
1096 __rte_unused uint32_t index,
1097 __rte_unused uint32_t pool)
1099 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1102 /* Remove a MAC address, and update filters */
1104 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1106 struct rte_eth_dev_data *data = dev->data;
1108 if (index < FM10K_MAX_MACADDR_NUM)
1109 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1113 /* Remove all VLAN and MAC address table entries */
1115 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev)
1118 struct fm10k_macvlan_filter_info *macvlan;
1120 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1121 for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1122 if (macvlan->vfta[j]) {
1123 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1124 if (macvlan->vfta[j] & (1 << k))
1125 fm10k_vlan_filter_set(dev,
1126 j * FM10K_UINT32_BIT_SIZE + k, false);
1133 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1135 if ((request < min) || (request > max) || ((request % mult) != 0))
1142 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1143 * the physical address is required. If the memzone is already created, then
1144 * this function returns a pointer to the existing memzone.
1146 static inline const struct rte_memzone *
1147 allocate_hw_ring(const char *driver_name, const char *ring_name,
1148 uint8_t port_id, uint16_t queue_id, int socket_id,
1149 uint32_t size, uint32_t align)
1151 char name[RTE_MEMZONE_NAMESIZE];
1152 const struct rte_memzone *mz;
1154 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1155 driver_name, ring_name, port_id, queue_id, socket_id);
1157 /* return the memzone if it already exists */
1158 mz = rte_memzone_lookup(name);
1162 #ifdef RTE_LIBRTE_XEN_DOM0
1163 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1166 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1171 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1173 if ((request < min) || (request > max) || ((div % request) != 0))
1180 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1182 uint16_t rx_free_thresh;
1184 if (conf->rx_free_thresh == 0)
1185 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1187 rx_free_thresh = conf->rx_free_thresh;
1189 /* make sure the requested threshold satisfies the constraints */
1190 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1191 FM10K_RX_FREE_THRESH_MAX(q),
1192 FM10K_RX_FREE_THRESH_DIV(q),
1194 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1195 "less than or equal to %u, "
1196 "greater than or equal to %u, "
1197 "and a divisor of %u",
1198 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1199 FM10K_RX_FREE_THRESH_MIN(q),
1200 FM10K_RX_FREE_THRESH_DIV(q));
1204 q->alloc_thresh = rx_free_thresh;
1205 q->drop_en = conf->rx_drop_en;
1206 q->rx_deferred_start = conf->rx_deferred_start;
1212 * Hardware requires specific alignment for Rx packet buffers. At
1213 * least one of the following two conditions must be satisfied.
1214 * 1. Address is 512B aligned
1215 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1217 * As such, the driver may need to adjust the DMA address within the
1218 * buffer by up to 512B.
1220 * return 1 if the element size is valid, otherwise return 0.
1223 mempool_element_size_valid(struct rte_mempool *mp)
1227 /* elt_size includes mbuf header and headroom */
1228 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1229 RTE_PKTMBUF_HEADROOM;
1231 /* account for up to 512B of alignment */
1232 min_size -= FM10K_RX_DATABUF_ALIGN;
1234 /* sanity check for overflow */
1235 if (min_size > mp->elt_size)
1243 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1244 uint16_t nb_desc, unsigned int socket_id,
1245 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1247 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 struct fm10k_rx_queue *q;
1249 const struct rte_memzone *mz;
1251 PMD_INIT_FUNC_TRACE();
1253 /* make sure the mempool element size can account for alignment. */
1254 if (!mempool_element_size_valid(mp)) {
1255 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1259 /* make sure a valid number of descriptors have been requested */
1260 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1261 FM10K_MULT_RX_DESC, nb_desc)) {
1262 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1263 "less than or equal to %"PRIu32", "
1264 "greater than or equal to %u, "
1265 "and a multiple of %u",
1266 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1267 FM10K_MULT_RX_DESC);
1272 * if this queue existed already, free the associated memory. The
1273 * queue cannot be reused in case we need to allocate memory on
1274 * different socket than was previously used.
1276 if (dev->data->rx_queues[queue_id] != NULL) {
1277 rx_queue_free(dev->data->rx_queues[queue_id]);
1278 dev->data->rx_queues[queue_id] = NULL;
1281 /* allocate memory for the queue structure */
1282 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1285 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1291 q->nb_desc = nb_desc;
1292 q->port_id = dev->data->port_id;
1293 q->queue_id = queue_id;
1294 q->tail_ptr = (volatile uint32_t *)
1295 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1296 if (handle_rxconf(q, conf))
1299 /* allocate memory for the software ring */
1300 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1301 nb_desc * sizeof(struct rte_mbuf *),
1302 RTE_CACHE_LINE_SIZE, socket_id);
1303 if (q->sw_ring == NULL) {
1304 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1310 * allocate memory for the hardware descriptor ring. A memzone large
1311 * enough to hold the maximum ring size is requested to allow for
1312 * resizing in later calls to the queue setup function.
1314 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1315 dev->data->port_id, queue_id, socket_id,
1316 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1318 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1319 rte_free(q->sw_ring);
1323 q->hw_ring = mz->addr;
1324 #ifdef RTE_LIBRTE_XEN_DOM0
1325 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1327 q->hw_ring_phys_addr = mz->phys_addr;
1330 dev->data->rx_queues[queue_id] = q;
1335 fm10k_rx_queue_release(void *queue)
1337 PMD_INIT_FUNC_TRACE();
1339 rx_queue_free(queue);
1343 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1345 uint16_t tx_free_thresh;
1346 uint16_t tx_rs_thresh;
1348 /* constraint MACROs require that tx_free_thresh is configured
1349 * before tx_rs_thresh */
1350 if (conf->tx_free_thresh == 0)
1351 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1353 tx_free_thresh = conf->tx_free_thresh;
1355 /* make sure the requested threshold satisfies the constraints */
1356 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1357 FM10K_TX_FREE_THRESH_MAX(q),
1358 FM10K_TX_FREE_THRESH_DIV(q),
1360 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1361 "less than or equal to %u, "
1362 "greater than or equal to %u, "
1363 "and a divisor of %u",
1364 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1365 FM10K_TX_FREE_THRESH_MIN(q),
1366 FM10K_TX_FREE_THRESH_DIV(q));
1370 q->free_thresh = tx_free_thresh;
1372 if (conf->tx_rs_thresh == 0)
1373 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1375 tx_rs_thresh = conf->tx_rs_thresh;
1377 q->tx_deferred_start = conf->tx_deferred_start;
1379 /* make sure the requested threshold satisfies the constraints */
1380 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1381 FM10K_TX_RS_THRESH_MAX(q),
1382 FM10K_TX_RS_THRESH_DIV(q),
1384 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1385 "less than or equal to %u, "
1386 "greater than or equal to %u, "
1387 "and a divisor of %u",
1388 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1389 FM10K_TX_RS_THRESH_MIN(q),
1390 FM10K_TX_RS_THRESH_DIV(q));
1394 q->rs_thresh = tx_rs_thresh;
1400 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1401 uint16_t nb_desc, unsigned int socket_id,
1402 const struct rte_eth_txconf *conf)
1404 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1405 struct fm10k_tx_queue *q;
1406 const struct rte_memzone *mz;
1408 PMD_INIT_FUNC_TRACE();
1410 /* make sure a valid number of descriptors have been requested */
1411 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1412 FM10K_MULT_TX_DESC, nb_desc)) {
1413 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1414 "less than or equal to %"PRIu32", "
1415 "greater than or equal to %u, "
1416 "and a multiple of %u",
1417 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1418 FM10K_MULT_TX_DESC);
1423 * if this queue existed already, free the associated memory. The
1424 * queue cannot be reused in case we need to allocate memory on
1425 * different socket than was previously used.
1427 if (dev->data->tx_queues[queue_id] != NULL) {
1428 tx_queue_free(dev->data->tx_queues[queue_id]);
1429 dev->data->tx_queues[queue_id] = NULL;
1432 /* allocate memory for the queue structure */
1433 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1436 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1441 q->nb_desc = nb_desc;
1442 q->port_id = dev->data->port_id;
1443 q->queue_id = queue_id;
1444 q->tail_ptr = (volatile uint32_t *)
1445 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1446 if (handle_txconf(q, conf))
1449 /* allocate memory for the software ring */
1450 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1451 nb_desc * sizeof(struct rte_mbuf *),
1452 RTE_CACHE_LINE_SIZE, socket_id);
1453 if (q->sw_ring == NULL) {
1454 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1460 * allocate memory for the hardware descriptor ring. A memzone large
1461 * enough to hold the maximum ring size is requested to allow for
1462 * resizing in later calls to the queue setup function.
1464 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1465 dev->data->port_id, queue_id, socket_id,
1466 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1468 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1469 rte_free(q->sw_ring);
1473 q->hw_ring = mz->addr;
1474 #ifdef RTE_LIBRTE_XEN_DOM0
1475 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1477 q->hw_ring_phys_addr = mz->phys_addr;
1481 * allocate memory for the RS bit tracker. Enough slots to hold the
1482 * descriptor index for each RS bit needing to be set are required.
1484 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1485 ((nb_desc + 1) / q->rs_thresh) *
1487 RTE_CACHE_LINE_SIZE, socket_id);
1488 if (q->rs_tracker.list == NULL) {
1489 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1490 rte_free(q->sw_ring);
1495 dev->data->tx_queues[queue_id] = q;
1500 fm10k_tx_queue_release(void *queue)
1502 PMD_INIT_FUNC_TRACE();
1504 tx_queue_free(queue);
1508 fm10k_reta_update(struct rte_eth_dev *dev,
1509 struct rte_eth_rss_reta_entry64 *reta_conf,
1512 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513 uint16_t i, j, idx, shift;
1517 PMD_INIT_FUNC_TRACE();
1519 if (reta_size > FM10K_MAX_RSS_INDICES) {
1520 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1521 "(%d) doesn't match the number hardware can supported "
1522 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1527 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1528 * 128-entries in 32 registers
1530 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1531 idx = i / RTE_RETA_GROUP_SIZE;
1532 shift = i % RTE_RETA_GROUP_SIZE;
1533 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1534 BIT_MASK_PER_UINT32);
1539 if (mask != BIT_MASK_PER_UINT32)
1540 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1542 for (j = 0; j < CHARS_PER_UINT32; j++) {
1543 if (mask & (0x1 << j)) {
1545 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1546 reta |= reta_conf[idx].reta[shift + j] <<
1550 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1557 fm10k_reta_query(struct rte_eth_dev *dev,
1558 struct rte_eth_rss_reta_entry64 *reta_conf,
1561 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1562 uint16_t i, j, idx, shift;
1566 PMD_INIT_FUNC_TRACE();
1568 if (reta_size < FM10K_MAX_RSS_INDICES) {
1569 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1570 "(%d) doesn't match the number hardware can supported "
1571 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1576 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1577 * 128-entries in 32 registers
1579 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1580 idx = i / RTE_RETA_GROUP_SIZE;
1581 shift = i % RTE_RETA_GROUP_SIZE;
1582 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1583 BIT_MASK_PER_UINT32);
1587 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1588 for (j = 0; j < CHARS_PER_UINT32; j++) {
1589 if (mask & (0x1 << j))
1590 reta_conf[idx].reta[shift + j] = ((reta >>
1591 CHAR_BIT * j) & UINT8_MAX);
1599 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1600 struct rte_eth_rss_conf *rss_conf)
1602 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1605 uint64_t hf = rss_conf->rss_hf;
1608 PMD_INIT_FUNC_TRACE();
1610 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1611 FM10K_RSSRK_ENTRIES_PER_REG)
1618 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1619 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1620 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1621 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1622 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1623 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1624 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1625 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1626 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1628 /* If the mapping doesn't fit any supported, return */
1633 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1634 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1636 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1642 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1643 struct rte_eth_rss_conf *rss_conf)
1645 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1646 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1651 PMD_INIT_FUNC_TRACE();
1653 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1654 FM10K_RSSRK_ENTRIES_PER_REG)
1658 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1659 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1661 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1663 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1664 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1665 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1666 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1667 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1668 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1669 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1670 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1671 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1673 rss_conf->rss_hf = hf;
1679 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1681 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1682 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1684 /* Bind all local non-queue interrupt to vector 0 */
1687 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1688 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1689 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1690 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1691 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1692 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1694 /* Enable misc causes */
1695 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1696 FM10K_EIMR_ENABLE(THI_FAULT) |
1697 FM10K_EIMR_ENABLE(FUM_FAULT) |
1698 FM10K_EIMR_ENABLE(MAILBOX) |
1699 FM10K_EIMR_ENABLE(SWITCHREADY) |
1700 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1701 FM10K_EIMR_ENABLE(SRAMERROR) |
1702 FM10K_EIMR_ENABLE(VFLR));
1705 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1706 FM10K_ITR_MASK_CLEAR);
1707 FM10K_WRITE_FLUSH(hw);
1711 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
1713 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1714 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1718 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1719 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1720 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1721 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1722 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1723 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1725 /* Disable misc causes */
1726 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
1727 FM10K_EIMR_DISABLE(THI_FAULT) |
1728 FM10K_EIMR_DISABLE(FUM_FAULT) |
1729 FM10K_EIMR_DISABLE(MAILBOX) |
1730 FM10K_EIMR_DISABLE(SWITCHREADY) |
1731 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1732 FM10K_EIMR_DISABLE(SRAMERROR) |
1733 FM10K_EIMR_DISABLE(VFLR));
1736 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
1737 FM10K_WRITE_FLUSH(hw);
1741 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1743 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1744 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1746 /* Bind all local non-queue interrupt to vector 0 */
1749 /* Only INT 0 available, other 15 are reserved. */
1750 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1753 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1754 FM10K_ITR_MASK_CLEAR);
1755 FM10K_WRITE_FLUSH(hw);
1759 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
1761 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1762 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1766 /* Only INT 0 available, other 15 are reserved. */
1767 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1770 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
1771 FM10K_WRITE_FLUSH(hw);
1775 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1777 struct fm10k_fault fault;
1779 const char *estr = "Unknown error";
1781 /* Process PCA fault */
1782 if (eicr & FM10K_EICR_PCA_FAULT) {
1783 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1786 switch (fault.type) {
1788 estr = "PCA_NO_FAULT"; break;
1789 case PCA_UNMAPPED_ADDR:
1790 estr = "PCA_UNMAPPED_ADDR"; break;
1791 case PCA_BAD_QACCESS_PF:
1792 estr = "PCA_BAD_QACCESS_PF"; break;
1793 case PCA_BAD_QACCESS_VF:
1794 estr = "PCA_BAD_QACCESS_VF"; break;
1795 case PCA_MALICIOUS_REQ:
1796 estr = "PCA_MALICIOUS_REQ"; break;
1797 case PCA_POISONED_TLP:
1798 estr = "PCA_POISONED_TLP"; break;
1800 estr = "PCA_TLP_ABORT"; break;
1804 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1805 estr, fault.func ? "VF" : "PF", fault.func,
1806 fault.address, fault.specinfo);
1809 /* Process THI fault */
1810 if (eicr & FM10K_EICR_THI_FAULT) {
1811 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1814 switch (fault.type) {
1816 estr = "THI_NO_FAULT"; break;
1817 case THI_MAL_DIS_Q_FAULT:
1818 estr = "THI_MAL_DIS_Q_FAULT"; break;
1822 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1823 estr, fault.func ? "VF" : "PF", fault.func,
1824 fault.address, fault.specinfo);
1827 /* Process FUM fault */
1828 if (eicr & FM10K_EICR_FUM_FAULT) {
1829 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1832 switch (fault.type) {
1834 estr = "FUM_NO_FAULT"; break;
1835 case FUM_UNMAPPED_ADDR:
1836 estr = "FUM_UNMAPPED_ADDR"; break;
1837 case FUM_POISONED_TLP:
1838 estr = "FUM_POISONED_TLP"; break;
1839 case FUM_BAD_VF_QACCESS:
1840 estr = "FUM_BAD_VF_QACCESS"; break;
1841 case FUM_ADD_DECODE_ERR:
1842 estr = "FUM_ADD_DECODE_ERR"; break;
1844 estr = "FUM_RO_ERROR"; break;
1845 case FUM_QPRC_CRC_ERROR:
1846 estr = "FUM_QPRC_CRC_ERROR"; break;
1847 case FUM_CSR_TIMEOUT:
1848 estr = "FUM_CSR_TIMEOUT"; break;
1849 case FUM_INVALID_TYPE:
1850 estr = "FUM_INVALID_TYPE"; break;
1851 case FUM_INVALID_LENGTH:
1852 estr = "FUM_INVALID_LENGTH"; break;
1853 case FUM_INVALID_BE:
1854 estr = "FUM_INVALID_BE"; break;
1855 case FUM_INVALID_ALIGN:
1856 estr = "FUM_INVALID_ALIGN"; break;
1860 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1861 estr, fault.func ? "VF" : "PF", fault.func,
1862 fault.address, fault.specinfo);
1867 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1872 * PF interrupt handler triggered by NIC for handling specific interrupt.
1875 * Pointer to interrupt handle.
1877 * The address of parameter (struct rte_eth_dev *) regsitered before.
1883 fm10k_dev_interrupt_handler_pf(
1884 __rte_unused struct rte_intr_handle *handle,
1887 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1888 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1889 uint32_t cause, status;
1891 if (hw->mac.type != fm10k_mac_pf)
1894 cause = FM10K_READ_REG(hw, FM10K_EICR);
1896 /* Handle PCI fault cases */
1897 if (cause & FM10K_EICR_FAULT_MASK) {
1898 PMD_INIT_LOG(ERR, "INT: find fault!");
1899 fm10k_dev_handle_fault(hw, cause);
1902 /* Handle switch up/down */
1903 if (cause & FM10K_EICR_SWITCHNOTREADY)
1904 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1906 if (cause & FM10K_EICR_SWITCHREADY)
1907 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1909 /* Handle mailbox message */
1911 hw->mbx.ops.process(hw, &hw->mbx);
1912 fm10k_mbx_unlock(hw);
1914 /* Handle SRAM error */
1915 if (cause & FM10K_EICR_SRAMERROR) {
1916 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1918 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1919 /* Write to clear pending bits */
1920 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1922 /* Todo: print out error message after shared code updates */
1925 /* Clear these 3 events if having any */
1926 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1927 FM10K_EICR_SWITCHREADY;
1929 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1931 /* Re-enable interrupt from device side */
1932 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1933 FM10K_ITR_MASK_CLEAR);
1934 /* Re-enable interrupt from host side */
1935 rte_intr_enable(&(dev->pci_dev->intr_handle));
1939 * VF interrupt handler triggered by NIC for handling specific interrupt.
1942 * Pointer to interrupt handle.
1944 * The address of parameter (struct rte_eth_dev *) regsitered before.
1950 fm10k_dev_interrupt_handler_vf(
1951 __rte_unused struct rte_intr_handle *handle,
1954 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1955 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1957 if (hw->mac.type != fm10k_mac_vf)
1960 /* Handle mailbox message if lock is acquired */
1962 hw->mbx.ops.process(hw, &hw->mbx);
1963 fm10k_mbx_unlock(hw);
1965 /* Re-enable interrupt from device side */
1966 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1967 FM10K_ITR_MASK_CLEAR);
1968 /* Re-enable interrupt from host side */
1969 rte_intr_enable(&(dev->pci_dev->intr_handle));
1972 /* Mailbox message handler in VF */
1973 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1974 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1975 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1976 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1977 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1980 /* Mailbox message handler in PF */
1981 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1982 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1983 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1984 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1985 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1986 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1987 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1988 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1992 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1996 /* Initialize mailbox lock */
1997 fm10k_mbx_initlock(hw);
1999 /* Replace default message handler with new ones */
2000 if (hw->mac.type == fm10k_mac_pf)
2001 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2003 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2006 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2010 /* Connect to SM for PF device or PF for VF device */
2011 return hw->mbx.ops.connect(hw, &hw->mbx);
2015 fm10k_close_mbx_service(struct fm10k_hw *hw)
2017 /* Disconnect from SM for PF device or PF for VF device */
2018 hw->mbx.ops.disconnect(hw, &hw->mbx);
2021 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2022 .dev_configure = fm10k_dev_configure,
2023 .dev_start = fm10k_dev_start,
2024 .dev_stop = fm10k_dev_stop,
2025 .dev_close = fm10k_dev_close,
2026 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2027 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2028 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2029 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2030 .stats_get = fm10k_stats_get,
2031 .stats_reset = fm10k_stats_reset,
2032 .link_update = fm10k_link_update,
2033 .dev_infos_get = fm10k_dev_infos_get,
2034 .vlan_filter_set = fm10k_vlan_filter_set,
2035 .vlan_offload_set = fm10k_vlan_offload_set,
2036 .mac_addr_add = fm10k_macaddr_add,
2037 .mac_addr_remove = fm10k_macaddr_remove,
2038 .rx_queue_start = fm10k_dev_rx_queue_start,
2039 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2040 .tx_queue_start = fm10k_dev_tx_queue_start,
2041 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2042 .rx_queue_setup = fm10k_rx_queue_setup,
2043 .rx_queue_release = fm10k_rx_queue_release,
2044 .tx_queue_setup = fm10k_tx_queue_setup,
2045 .tx_queue_release = fm10k_tx_queue_release,
2046 .reta_update = fm10k_reta_update,
2047 .reta_query = fm10k_reta_query,
2048 .rss_hash_update = fm10k_rss_hash_update,
2049 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2053 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2055 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2057 struct fm10k_macvlan_filter_info *macvlan;
2059 PMD_INIT_FUNC_TRACE();
2061 dev->dev_ops = &fm10k_eth_dev_ops;
2062 dev->rx_pkt_burst = &fm10k_recv_pkts;
2063 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2065 if (dev->data->scattered_rx)
2066 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2068 /* only initialize in the primary process */
2069 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2072 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2073 memset(macvlan, 0, sizeof(*macvlan));
2074 /* Vendor and Device ID need to be set before init of shared code */
2075 memset(hw, 0, sizeof(*hw));
2076 hw->device_id = dev->pci_dev->id.device_id;
2077 hw->vendor_id = dev->pci_dev->id.vendor_id;
2078 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2079 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2080 hw->revision_id = 0;
2081 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2082 if (hw->hw_addr == NULL) {
2083 PMD_INIT_LOG(ERR, "Bad mem resource."
2084 " Try to blacklist unused devices.");
2088 /* Store fm10k_adapter pointer */
2089 hw->back = dev->data->dev_private;
2091 /* Initialize the shared code */
2092 diag = fm10k_init_shared_code(hw);
2093 if (diag != FM10K_SUCCESS) {
2094 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2099 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2100 * there is no way to get link status without reading BAR4. Until this
2101 * works, assume we have maximum bandwidth.
2102 * @todo - fix bus info
2104 hw->bus_caps.speed = fm10k_bus_speed_8000;
2105 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2106 hw->bus_caps.payload = fm10k_bus_payload_512;
2107 hw->bus.speed = fm10k_bus_speed_8000;
2108 hw->bus.width = fm10k_bus_width_pcie_x8;
2109 hw->bus.payload = fm10k_bus_payload_256;
2111 /* Initialize the hw */
2112 diag = fm10k_init_hw(hw);
2113 if (diag != FM10K_SUCCESS) {
2114 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2118 /* Initialize MAC address(es) */
2119 dev->data->mac_addrs = rte_zmalloc("fm10k",
2120 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2121 if (dev->data->mac_addrs == NULL) {
2122 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2126 diag = fm10k_read_mac_addr(hw);
2128 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2129 &dev->data->mac_addrs[0]);
2131 if (diag != FM10K_SUCCESS ||
2132 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2134 /* Generate a random addr */
2135 eth_random_addr(hw->mac.addr);
2136 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2137 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2138 &dev->data->mac_addrs[0]);
2141 /* Reset the hw statistics */
2142 fm10k_stats_reset(dev);
2145 diag = fm10k_reset_hw(hw);
2146 if (diag != FM10K_SUCCESS) {
2147 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2151 /* Setup mailbox service */
2152 diag = fm10k_setup_mbx_service(hw);
2153 if (diag != FM10K_SUCCESS) {
2154 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2158 /*PF/VF has different interrupt handling mechanism */
2159 if (hw->mac.type == fm10k_mac_pf) {
2160 /* register callback func to eal lib */
2161 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2162 fm10k_dev_interrupt_handler_pf, (void *)dev);
2164 /* enable MISC interrupt */
2165 fm10k_dev_enable_intr_pf(dev);
2167 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2168 fm10k_dev_interrupt_handler_vf, (void *)dev);
2170 fm10k_dev_enable_intr_vf(dev);
2173 /* Enable uio intr after callback registered */
2174 rte_intr_enable(&(dev->pci_dev->intr_handle));
2176 hw->mac.ops.update_int_moderator(hw);
2178 /* Make sure Switch Manager is ready before going forward. */
2179 if (hw->mac.type == fm10k_mac_pf) {
2180 int switch_ready = 0;
2183 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2185 hw->mac.ops.get_host_state(hw, &switch_ready);
2186 fm10k_mbx_unlock(hw);
2189 /* Delay some time to acquire async LPORT_MAP info. */
2190 rte_delay_us(WAIT_SWITCH_MSG_US);
2193 if (switch_ready == 0) {
2194 PMD_INIT_LOG(ERR, "switch is not ready");
2200 * Below function will trigger operations on mailbox, acquire lock to
2201 * avoid race condition from interrupt handler. Operations on mailbox
2202 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2203 * will handle and generate an interrupt to our side. Then, FIFO in
2204 * mailbox will be touched.
2207 /* Enable port first */
2208 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2210 /* Set unicast mode by default. App can change to other mode in other
2213 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2214 FM10K_XCAST_MODE_NONE);
2216 fm10k_mbx_unlock(hw);
2218 /* Add default mac address */
2219 fm10k_MAC_filter_set(dev, hw->mac.addr, true);
2225 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2227 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2229 PMD_INIT_FUNC_TRACE();
2231 /* only uninitialize in the primary process */
2232 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2235 /* safe to close dev here */
2236 fm10k_dev_close(dev);
2238 dev->dev_ops = NULL;
2239 dev->rx_pkt_burst = NULL;
2240 dev->tx_pkt_burst = NULL;
2242 /* disable uio/vfio intr */
2243 rte_intr_disable(&(dev->pci_dev->intr_handle));
2245 /*PF/VF has different interrupt handling mechanism */
2246 if (hw->mac.type == fm10k_mac_pf) {
2247 /* disable interrupt */
2248 fm10k_dev_disable_intr_pf(dev);
2250 /* unregister callback func to eal lib */
2251 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2252 fm10k_dev_interrupt_handler_pf, (void *)dev);
2254 /* disable interrupt */
2255 fm10k_dev_disable_intr_vf(dev);
2257 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2258 fm10k_dev_interrupt_handler_vf, (void *)dev);
2261 /* free mac memory */
2262 if (dev->data->mac_addrs) {
2263 rte_free(dev->data->mac_addrs);
2264 dev->data->mac_addrs = NULL;
2267 memset(hw, 0, sizeof(*hw));
2273 * The set of PCI devices this driver supports. This driver will enable both PF
2274 * and SRIOV-VF devices.
2276 static const struct rte_pci_id pci_id_fm10k_map[] = {
2277 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2278 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2279 #include "rte_pci_dev_ids.h"
2280 { .vendor_id = 0, /* sentinel */ },
2283 static struct eth_driver rte_pmd_fm10k = {
2285 .name = "rte_pmd_fm10k",
2286 .id_table = pci_id_fm10k_map,
2287 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2289 .eth_dev_init = eth_fm10k_dev_init,
2290 .eth_dev_uninit = eth_fm10k_dev_uninit,
2291 .dev_private_size = sizeof(struct fm10k_adapter),
2295 * Driver initialization routine.
2296 * Invoked once at EAL init time.
2297 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2300 rte_pmd_fm10k_init(__rte_unused const char *name,
2301 __rte_unused const char *params)
2303 PMD_INIT_FUNC_TRACE();
2304 rte_eth_driver_register(&rte_pmd_fm10k);
2308 static struct rte_driver rte_fm10k_driver = {
2310 .init = rte_pmd_fm10k_init,
2313 PMD_REGISTER_DRIVER(rte_fm10k_driver);