4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 #define MAIN_VSI_POOL_NUMBER 0
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US 100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
59 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
61 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
62 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
63 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
65 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
66 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
67 const u8 *mac, bool add, uint32_t pool);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
72 fm10k_mbx_initlock(struct fm10k_hw *hw)
74 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
78 fm10k_mbx_lock(struct fm10k_hw *hw)
80 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
81 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
85 fm10k_mbx_unlock(struct fm10k_hw *hw)
87 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
91 * reset queue to initial state, allocate software buffers used when starting
94 * return -ENOMEM if buffers cannot be allocated
95 * return -EINVAL if buffers do not satisfy alignment condition
98 rx_queue_reset(struct fm10k_rx_queue *q)
102 PMD_INIT_FUNC_TRACE();
104 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
108 for (i = 0; i < q->nb_desc; ++i) {
109 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
110 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
111 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
115 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
116 q->hw_ring[i].q.pkt_addr = dma_addr;
117 q->hw_ring[i].q.hdr_addr = dma_addr;
122 q->next_trigger = q->alloc_thresh - 1;
123 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
128 * clean queue, descriptor rings, free software buffers used when stopping
132 rx_queue_clean(struct fm10k_rx_queue *q)
134 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
136 PMD_INIT_FUNC_TRACE();
138 /* zero descriptor rings */
139 for (i = 0; i < q->nb_desc; ++i)
140 q->hw_ring[i] = zero;
142 /* free software buffers */
143 for (i = 0; i < q->nb_desc; ++i) {
145 rte_pktmbuf_free_seg(q->sw_ring[i]);
146 q->sw_ring[i] = NULL;
152 * free all queue memory used when releasing the queue (i.e. configure)
155 rx_queue_free(struct fm10k_rx_queue *q)
157 PMD_INIT_FUNC_TRACE();
159 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
162 rte_free(q->sw_ring);
171 * disable RX queue, wait unitl HW finished necessary flush operation
174 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
178 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
179 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
180 reg & ~FM10K_RXQCTL_ENABLE);
182 /* Wait 100us at most */
183 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
185 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
186 if (!(reg & FM10K_RXQCTL_ENABLE))
190 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
197 * reset queue to initial state, allocate software buffers used when starting
201 tx_queue_reset(struct fm10k_tx_queue *q)
203 PMD_INIT_FUNC_TRACE();
207 q->nb_free = q->nb_desc - 1;
208 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
209 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
213 * clean queue, descriptor rings, free software buffers used when stopping
217 tx_queue_clean(struct fm10k_tx_queue *q)
219 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
221 PMD_INIT_FUNC_TRACE();
223 /* zero descriptor rings */
224 for (i = 0; i < q->nb_desc; ++i)
225 q->hw_ring[i] = zero;
227 /* free software buffers */
228 for (i = 0; i < q->nb_desc; ++i) {
230 rte_pktmbuf_free_seg(q->sw_ring[i]);
231 q->sw_ring[i] = NULL;
237 * free all queue memory used when releasing the queue (i.e. configure)
240 tx_queue_free(struct fm10k_tx_queue *q)
242 PMD_INIT_FUNC_TRACE();
244 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
246 if (q->rs_tracker.list) {
247 rte_free(q->rs_tracker.list);
248 q->rs_tracker.list = NULL;
251 rte_free(q->sw_ring);
260 * disable TX queue, wait unitl HW finished necessary flush operation
263 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
267 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
268 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
269 reg & ~FM10K_TXDCTL_ENABLE);
271 /* Wait 100us at most */
272 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
274 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
275 if (!(reg & FM10K_TXDCTL_ENABLE))
279 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
286 fm10k_check_mq_mode(struct rte_eth_dev *dev)
288 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
289 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
290 struct rte_eth_vmdq_rx_conf *vmdq_conf;
291 uint16_t nb_rx_q = dev->data->nb_rx_queues;
293 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
295 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
296 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
300 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
303 if (hw->mac.type == fm10k_mac_vf) {
304 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
308 /* Check VMDQ queue pool number */
309 if (vmdq_conf->nb_queue_pools >
310 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
311 vmdq_conf->nb_queue_pools > nb_rx_q) {
312 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
313 vmdq_conf->nb_queue_pools);
321 fm10k_dev_configure(struct rte_eth_dev *dev)
325 PMD_INIT_FUNC_TRACE();
327 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
328 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
329 /* multipe queue mode checking */
330 ret = fm10k_check_mq_mode(dev);
332 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
340 /* fls = find last set bit = 32 minus the number of leading zeros */
342 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
346 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
348 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
349 struct rte_eth_vmdq_rx_conf *vmdq_conf;
352 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
354 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
355 if (!vmdq_conf->pool_map[i].pools)
358 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
359 fm10k_mbx_unlock(hw);
364 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
366 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
368 /* Add default mac address */
369 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
370 MAIN_VSI_POOL_NUMBER);
374 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
376 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
377 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
378 uint32_t mrqc, *key, i, reta, j;
381 #define RSS_KEY_SIZE 40
382 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
383 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
384 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
385 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
386 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
387 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
390 if (dev->data->nb_rx_queues == 1 ||
391 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
392 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
395 /* random key is rss_intel_key (default) or user provided (rss_key) */
396 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
397 key = (uint32_t *)rss_intel_key;
399 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
401 /* Now fill our hash function seeds, 4 bytes at a time */
402 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
403 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
406 * Fill in redirection table
407 * The byte-swap is needed because NIC registers are in
408 * little-endian order.
411 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
412 if (j == dev->data->nb_rx_queues)
414 reta = (reta << CHAR_BIT) | j;
416 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
421 * Generate RSS hash based on packet types, TCP/UDP
422 * port numbers and/or IPv4/v6 src and dst addresses
424 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
426 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
427 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
428 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
429 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
430 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
431 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
432 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
433 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
434 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
437 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
442 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
446 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
447 uint16_t nb_lport_old, uint16_t nb_lport_new)
449 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
453 /* Disable previous logic ports */
455 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
456 nb_lport_old, false);
457 /* Enable new logic ports */
458 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
460 fm10k_mbx_unlock(hw);
462 for (i = 0; i < nb_lport_new; i++) {
463 /* Set unicast mode by default. App can change
464 * to other mode in other API func.
467 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
468 FM10K_XCAST_MODE_NONE);
469 fm10k_mbx_unlock(hw);
474 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
476 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
477 struct rte_eth_vmdq_rx_conf *vmdq_conf;
478 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
479 struct fm10k_macvlan_filter_info *macvlan;
480 uint16_t nb_queue_pools = 0; /* pool number in configuration */
481 uint16_t nb_lport_new, nb_lport_old;
483 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
484 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
486 fm10k_dev_rss_configure(dev);
488 /* only PF supports VMDQ */
489 if (hw->mac.type != fm10k_mac_pf)
492 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
493 nb_queue_pools = vmdq_conf->nb_queue_pools;
495 /* no pool number change, no need to update logic port and VLAN/MAC */
496 if (macvlan->nb_queue_pools == nb_queue_pools)
499 nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
500 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
501 fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
503 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
504 memset(dev->data->mac_addrs, 0,
505 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
506 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
507 &dev->data->mac_addrs[0]);
508 memset(macvlan, 0, sizeof(*macvlan));
509 macvlan->nb_queue_pools = nb_queue_pools;
512 fm10k_dev_vmdq_rx_configure(dev);
514 fm10k_dev_pf_main_vsi_reset(dev);
518 fm10k_dev_tx_init(struct rte_eth_dev *dev)
520 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
522 struct fm10k_tx_queue *txq;
526 /* Disable TXINT to avoid possible interrupt */
527 for (i = 0; i < hw->mac.max_queues; i++)
528 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
529 3 << FM10K_TXINT_TIMER_SHIFT);
532 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
533 txq = dev->data->tx_queues[i];
534 base_addr = txq->hw_ring_phys_addr;
535 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
537 /* disable queue to avoid issues while updating state */
538 ret = tx_queue_disable(hw, i);
540 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
544 /* set location and size for descriptor ring */
545 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
546 base_addr & UINT64_LOWER_32BITS_MASK);
547 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
548 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
549 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
555 fm10k_dev_rx_init(struct rte_eth_dev *dev)
557 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
559 struct fm10k_rx_queue *rxq;
562 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
565 /* Disable RXINT to avoid possible interrupt */
566 for (i = 0; i < hw->mac.max_queues; i++)
567 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
568 3 << FM10K_RXINT_TIMER_SHIFT);
570 /* Setup RX queues */
571 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
572 rxq = dev->data->rx_queues[i];
573 base_addr = rxq->hw_ring_phys_addr;
574 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
576 /* disable queue to avoid issues while updating state */
577 ret = rx_queue_disable(hw, i);
579 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
583 /* Setup the Base and Length of the Rx Descriptor Ring */
584 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
585 base_addr & UINT64_LOWER_32BITS_MASK);
586 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
587 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
588 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
590 /* Configure the Rx buffer size for one buff without split */
591 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
592 RTE_PKTMBUF_HEADROOM);
593 /* As RX buffer is aligned to 512B within mbuf, some bytes are
594 * reserved for this purpose, and the worst case could be 511B.
595 * But SRR reg assumes all buffers have the same size. In order
596 * to fill the gap, we'll have to consider the worst case and
597 * assume 512B is reserved. If we don't do so, it's possible
598 * for HW to overwrite data to next mbuf.
600 buf_size -= FM10K_RX_DATABUF_ALIGN;
602 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
603 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
605 /* It adds dual VLAN length for supporting dual VLAN */
606 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
607 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
608 dev->data->dev_conf.rxmode.enable_scatter) {
610 dev->data->scattered_rx = 1;
611 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
612 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
613 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
614 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
617 /* Enable drop on empty, it's RO for VF */
618 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
619 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
621 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
622 FM10K_WRITE_FLUSH(hw);
625 /* Configure VMDQ/RSS if applicable */
626 fm10k_dev_mq_rx_configure(dev);
631 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
633 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
636 struct fm10k_rx_queue *rxq;
638 PMD_INIT_FUNC_TRACE();
640 if (rx_queue_id < dev->data->nb_rx_queues) {
641 rxq = dev->data->rx_queues[rx_queue_id];
642 err = rx_queue_reset(rxq);
643 if (err == -ENOMEM) {
644 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
646 } else if (err == -EINVAL) {
647 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
652 /* Setup the HW Rx Head and Tail Descriptor Pointers
653 * Note: this must be done AFTER the queue is enabled on real
654 * hardware, but BEFORE the queue is enabled when using the
655 * emulation platform. Do it in both places for now and remove
656 * this comment and the following two register writes when the
657 * emulation platform is no longer being used.
659 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
660 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
662 /* Set PF ownership flag for PF devices */
663 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
664 if (hw->mac.type == fm10k_mac_pf)
665 reg |= FM10K_RXQCTL_PF;
666 reg |= FM10K_RXQCTL_ENABLE;
667 /* enable RX queue */
668 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
669 FM10K_WRITE_FLUSH(hw);
671 /* Setup the HW Rx Head and Tail Descriptor Pointers
672 * Note: this must be done AFTER the queue is enabled
674 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
675 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
682 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
684 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686 PMD_INIT_FUNC_TRACE();
688 if (rx_queue_id < dev->data->nb_rx_queues) {
689 /* Disable RX queue */
690 rx_queue_disable(hw, rx_queue_id);
692 /* Free mbuf and clean HW ring */
693 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
700 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
702 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
703 /** @todo - this should be defined in the shared code */
704 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
705 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
708 PMD_INIT_FUNC_TRACE();
710 if (tx_queue_id < dev->data->nb_tx_queues) {
711 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
713 /* reset head and tail pointers */
714 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
715 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
717 /* enable TX queue */
718 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
719 FM10K_TXDCTL_ENABLE | txdctl);
720 FM10K_WRITE_FLUSH(hw);
728 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
730 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
732 PMD_INIT_FUNC_TRACE();
734 if (tx_queue_id < dev->data->nb_tx_queues) {
735 tx_queue_disable(hw, tx_queue_id);
736 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
742 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
744 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
745 != FM10K_DGLORTMAP_NONE);
749 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
751 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
754 PMD_INIT_FUNC_TRACE();
756 /* Return if it didn't acquire valid glort range */
757 if (!fm10k_glort_valid(hw))
761 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
762 FM10K_XCAST_MODE_PROMISC);
763 fm10k_mbx_unlock(hw);
765 if (status != FM10K_SUCCESS)
766 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
770 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
772 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
776 PMD_INIT_FUNC_TRACE();
778 /* Return if it didn't acquire valid glort range */
779 if (!fm10k_glort_valid(hw))
782 if (dev->data->all_multicast == 1)
783 mode = FM10K_XCAST_MODE_ALLMULTI;
785 mode = FM10K_XCAST_MODE_NONE;
788 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
790 fm10k_mbx_unlock(hw);
792 if (status != FM10K_SUCCESS)
793 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
797 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
799 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
802 PMD_INIT_FUNC_TRACE();
804 /* Return if it didn't acquire valid glort range */
805 if (!fm10k_glort_valid(hw))
808 /* If promiscuous mode is enabled, it doesn't make sense to enable
809 * allmulticast and disable promiscuous since fm10k only can select
812 if (dev->data->promiscuous) {
813 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
814 "needn't enable allmulticast");
819 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
820 FM10K_XCAST_MODE_ALLMULTI);
821 fm10k_mbx_unlock(hw);
823 if (status != FM10K_SUCCESS)
824 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
828 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
830 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
833 PMD_INIT_FUNC_TRACE();
835 /* Return if it didn't acquire valid glort range */
836 if (!fm10k_glort_valid(hw))
839 if (dev->data->promiscuous) {
840 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
841 "since promisc mode is enabled");
846 /* Change mode to unicast mode */
847 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
848 FM10K_XCAST_MODE_NONE);
849 fm10k_mbx_unlock(hw);
851 if (status != FM10K_SUCCESS)
852 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
856 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
858 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
859 uint32_t dglortdec, pool_len, rss_len, i;
860 uint16_t nb_queue_pools;
861 struct fm10k_macvlan_filter_info *macvlan;
863 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
864 nb_queue_pools = macvlan->nb_queue_pools;
865 pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
866 rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
867 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
869 /* Establish only MAP 0 as valid */
870 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
872 /* Configure VMDQ/RSS DGlort Decoder */
873 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
875 /* Invalidate all other GLORT entries */
876 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
877 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
878 FM10K_DGLORTMAP_NONE);
881 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
883 fm10k_dev_start(struct rte_eth_dev *dev)
885 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
888 PMD_INIT_FUNC_TRACE();
890 /* stop, init, then start the hw */
891 diag = fm10k_stop_hw(hw);
892 if (diag != FM10K_SUCCESS) {
893 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
897 diag = fm10k_init_hw(hw);
898 if (diag != FM10K_SUCCESS) {
899 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
903 diag = fm10k_start_hw(hw);
904 if (diag != FM10K_SUCCESS) {
905 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
909 diag = fm10k_dev_tx_init(dev);
911 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
915 diag = fm10k_dev_rx_init(dev);
917 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
921 if (hw->mac.type == fm10k_mac_pf)
922 fm10k_dev_dglort_map_configure(dev);
924 for (i = 0; i < dev->data->nb_rx_queues; i++) {
925 struct fm10k_rx_queue *rxq;
926 rxq = dev->data->rx_queues[i];
928 if (rxq->rx_deferred_start)
930 diag = fm10k_dev_rx_queue_start(dev, i);
933 for (j = 0; j < i; ++j)
934 rx_queue_clean(dev->data->rx_queues[j]);
939 for (i = 0; i < dev->data->nb_tx_queues; i++) {
940 struct fm10k_tx_queue *txq;
941 txq = dev->data->tx_queues[i];
943 if (txq->tx_deferred_start)
945 diag = fm10k_dev_tx_queue_start(dev, i);
948 for (j = 0; j < i; ++j)
949 tx_queue_clean(dev->data->tx_queues[j]);
950 for (j = 0; j < dev->data->nb_rx_queues; ++j)
951 rx_queue_clean(dev->data->rx_queues[j]);
956 /* Update default vlan when not in VMDQ mode */
957 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
958 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
964 fm10k_dev_stop(struct rte_eth_dev *dev)
968 PMD_INIT_FUNC_TRACE();
970 if (dev->data->tx_queues)
971 for (i = 0; i < dev->data->nb_tx_queues; i++)
972 fm10k_dev_tx_queue_stop(dev, i);
974 if (dev->data->rx_queues)
975 for (i = 0; i < dev->data->nb_rx_queues; i++)
976 fm10k_dev_rx_queue_stop(dev, i);
980 fm10k_dev_queue_release(struct rte_eth_dev *dev)
984 PMD_INIT_FUNC_TRACE();
986 if (dev->data->tx_queues) {
987 for (i = 0; i < dev->data->nb_tx_queues; i++)
988 fm10k_tx_queue_release(dev->data->tx_queues[i]);
991 if (dev->data->rx_queues) {
992 for (i = 0; i < dev->data->nb_rx_queues; i++)
993 fm10k_rx_queue_release(dev->data->rx_queues[i]);
998 fm10k_dev_close(struct rte_eth_dev *dev)
1000 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1002 struct fm10k_macvlan_filter_info *macvlan;
1004 PMD_INIT_FUNC_TRACE();
1006 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1007 nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1009 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1011 fm10k_mbx_unlock(hw);
1013 /* Stop mailbox service first */
1014 fm10k_close_mbx_service(hw);
1015 fm10k_dev_stop(dev);
1016 fm10k_dev_queue_release(dev);
1021 fm10k_link_update(struct rte_eth_dev *dev,
1022 __rte_unused int wait_to_complete)
1024 PMD_INIT_FUNC_TRACE();
1026 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
1027 * x8 PCIe interface. For now, we leave the speed undefined since there
1028 * is no 50Gbps Ethernet. */
1029 dev->data->dev_link.link_speed = 0;
1030 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1031 dev->data->dev_link.link_status = 1;
1037 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1039 uint64_t ipackets, opackets, ibytes, obytes;
1040 struct fm10k_hw *hw =
1041 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1042 struct fm10k_hw_stats *hw_stats =
1043 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1046 PMD_INIT_FUNC_TRACE();
1048 fm10k_update_hw_stats(hw, hw_stats);
1050 ipackets = opackets = ibytes = obytes = 0;
1051 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1052 (i < hw->mac.max_queues); ++i) {
1053 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1054 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1055 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1056 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1057 ipackets += stats->q_ipackets[i];
1058 opackets += stats->q_opackets[i];
1059 ibytes += stats->q_ibytes[i];
1060 obytes += stats->q_obytes[i];
1062 stats->ipackets = ipackets;
1063 stats->opackets = opackets;
1064 stats->ibytes = ibytes;
1065 stats->obytes = obytes;
1069 fm10k_stats_reset(struct rte_eth_dev *dev)
1071 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072 struct fm10k_hw_stats *hw_stats =
1073 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1075 PMD_INIT_FUNC_TRACE();
1077 memset(hw_stats, 0, sizeof(*hw_stats));
1078 fm10k_rebind_hw_stats(hw, hw_stats);
1082 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1083 struct rte_eth_dev_info *dev_info)
1085 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087 PMD_INIT_FUNC_TRACE();
1089 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1090 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1091 dev_info->max_rx_queues = hw->mac.max_queues;
1092 dev_info->max_tx_queues = hw->mac.max_queues;
1093 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1094 dev_info->max_hash_mac_addrs = 0;
1095 dev_info->max_vfs = dev->pci_dev->max_vfs;
1096 dev_info->vmdq_pool_base = 0;
1097 dev_info->vmdq_queue_base = 0;
1098 dev_info->max_vmdq_pools = ETH_32_POOLS;
1099 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1100 dev_info->rx_offload_capa =
1101 DEV_RX_OFFLOAD_VLAN_STRIP |
1102 DEV_RX_OFFLOAD_IPV4_CKSUM |
1103 DEV_RX_OFFLOAD_UDP_CKSUM |
1104 DEV_RX_OFFLOAD_TCP_CKSUM;
1105 dev_info->tx_offload_capa =
1106 DEV_TX_OFFLOAD_VLAN_INSERT |
1107 DEV_TX_OFFLOAD_IPV4_CKSUM |
1108 DEV_TX_OFFLOAD_UDP_CKSUM |
1109 DEV_TX_OFFLOAD_TCP_CKSUM |
1110 DEV_TX_OFFLOAD_TCP_TSO;
1112 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1113 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1115 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1117 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1118 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1119 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1121 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1125 dev_info->default_txconf = (struct rte_eth_txconf) {
1127 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1128 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1129 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1131 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1132 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1133 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1134 ETH_TXQ_FLAGS_NOOFFLOADS,
1140 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1143 uint16_t mac_num = 0;
1144 uint32_t vid_idx, vid_bit, mac_index;
1145 struct fm10k_hw *hw;
1146 struct fm10k_macvlan_filter_info *macvlan;
1147 struct rte_eth_dev_data *data = dev->data;
1149 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1150 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1152 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1153 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1157 if (vlan_id > ETH_VLAN_ID_MAX) {
1158 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1162 vid_idx = FM10K_VFTA_IDX(vlan_id);
1163 vid_bit = FM10K_VFTA_BIT(vlan_id);
1164 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1165 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1167 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1168 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1169 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1170 "in the VLAN filter table");
1175 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1176 fm10k_mbx_unlock(hw);
1177 if (result != FM10K_SUCCESS) {
1178 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1182 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1183 (result == FM10K_SUCCESS); mac_index++) {
1184 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1186 if (mac_num > macvlan->mac_num - 1) {
1187 PMD_INIT_LOG(ERR, "MAC address number "
1192 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1193 data->mac_addrs[mac_index].addr_bytes,
1195 fm10k_mbx_unlock(hw);
1198 if (result != FM10K_SUCCESS) {
1199 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1204 macvlan->vlan_num++;
1205 macvlan->vfta[vid_idx] |= vid_bit;
1207 macvlan->vlan_num--;
1208 macvlan->vfta[vid_idx] &= ~vid_bit;
1214 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1216 if (mask & ETH_VLAN_STRIP_MASK) {
1217 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1218 PMD_INIT_LOG(ERR, "VLAN stripping is "
1219 "always on in fm10k");
1222 if (mask & ETH_VLAN_EXTEND_MASK) {
1223 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1224 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1225 "supported in fm10k");
1228 if (mask & ETH_VLAN_FILTER_MASK) {
1229 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1230 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1234 /* Add/Remove a MAC address, and update filters to main VSI */
1235 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1236 const u8 *mac, bool add, uint32_t pool)
1238 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239 struct fm10k_macvlan_filter_info *macvlan;
1242 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1244 if (pool != MAIN_VSI_POOL_NUMBER) {
1245 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1246 "mac to pool %u", pool);
1249 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1250 if (!macvlan->vfta[j])
1252 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1253 if (!(macvlan->vfta[j] & (1 << k)))
1255 if (i + 1 > macvlan->vlan_num) {
1256 PMD_INIT_LOG(ERR, "vlan number not match");
1260 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1261 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1262 fm10k_mbx_unlock(hw);
1268 /* Add/Remove a MAC address, and update filters to VMDQ */
1269 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1270 const u8 *mac, bool add, uint32_t pool)
1272 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1273 struct fm10k_macvlan_filter_info *macvlan;
1274 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1277 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1278 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1280 if (pool > macvlan->nb_queue_pools) {
1281 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1283 pool, macvlan->nb_queue_pools);
1286 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1287 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1290 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1291 vmdq_conf->pool_map[i].vlan_id, add, 0);
1292 fm10k_mbx_unlock(hw);
1296 /* Add/Remove a MAC address, and update filters */
1297 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1298 const u8 *mac, bool add, uint32_t pool)
1300 struct fm10k_macvlan_filter_info *macvlan;
1302 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1304 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1305 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1307 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1315 /* Add a MAC address, and update filters */
1317 fm10k_macaddr_add(struct rte_eth_dev *dev,
1318 struct ether_addr *mac_addr,
1322 struct fm10k_macvlan_filter_info *macvlan;
1324 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1325 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1326 macvlan->mac_vmdq_id[index] = pool;
1329 /* Remove a MAC address, and update filters */
1331 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1333 struct rte_eth_dev_data *data = dev->data;
1334 struct fm10k_macvlan_filter_info *macvlan;
1336 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1337 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1338 FALSE, macvlan->mac_vmdq_id[index]);
1339 macvlan->mac_vmdq_id[index] = 0;
1343 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1345 if ((request < min) || (request > max) || ((request % mult) != 0))
1352 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1353 * the physical address is required. If the memzone is already created, then
1354 * this function returns a pointer to the existing memzone.
1356 static inline const struct rte_memzone *
1357 allocate_hw_ring(const char *driver_name, const char *ring_name,
1358 uint8_t port_id, uint16_t queue_id, int socket_id,
1359 uint32_t size, uint32_t align)
1361 char name[RTE_MEMZONE_NAMESIZE];
1362 const struct rte_memzone *mz;
1364 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1365 driver_name, ring_name, port_id, queue_id, socket_id);
1367 /* return the memzone if it already exists */
1368 mz = rte_memzone_lookup(name);
1372 #ifdef RTE_LIBRTE_XEN_DOM0
1373 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1376 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1381 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1383 if ((request < min) || (request > max) || ((div % request) != 0))
1390 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1392 uint16_t rx_free_thresh;
1394 if (conf->rx_free_thresh == 0)
1395 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1397 rx_free_thresh = conf->rx_free_thresh;
1399 /* make sure the requested threshold satisfies the constraints */
1400 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1401 FM10K_RX_FREE_THRESH_MAX(q),
1402 FM10K_RX_FREE_THRESH_DIV(q),
1404 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1405 "less than or equal to %u, "
1406 "greater than or equal to %u, "
1407 "and a divisor of %u",
1408 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1409 FM10K_RX_FREE_THRESH_MIN(q),
1410 FM10K_RX_FREE_THRESH_DIV(q));
1414 q->alloc_thresh = rx_free_thresh;
1415 q->drop_en = conf->rx_drop_en;
1416 q->rx_deferred_start = conf->rx_deferred_start;
1422 * Hardware requires specific alignment for Rx packet buffers. At
1423 * least one of the following two conditions must be satisfied.
1424 * 1. Address is 512B aligned
1425 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1427 * As such, the driver may need to adjust the DMA address within the
1428 * buffer by up to 512B.
1430 * return 1 if the element size is valid, otherwise return 0.
1433 mempool_element_size_valid(struct rte_mempool *mp)
1437 /* elt_size includes mbuf header and headroom */
1438 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1439 RTE_PKTMBUF_HEADROOM;
1441 /* account for up to 512B of alignment */
1442 min_size -= FM10K_RX_DATABUF_ALIGN;
1444 /* sanity check for overflow */
1445 if (min_size > mp->elt_size)
1453 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1454 uint16_t nb_desc, unsigned int socket_id,
1455 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1457 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1458 struct fm10k_rx_queue *q;
1459 const struct rte_memzone *mz;
1461 PMD_INIT_FUNC_TRACE();
1463 /* make sure the mempool element size can account for alignment. */
1464 if (!mempool_element_size_valid(mp)) {
1465 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1469 /* make sure a valid number of descriptors have been requested */
1470 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1471 FM10K_MULT_RX_DESC, nb_desc)) {
1472 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1473 "less than or equal to %"PRIu32", "
1474 "greater than or equal to %u, "
1475 "and a multiple of %u",
1476 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1477 FM10K_MULT_RX_DESC);
1482 * if this queue existed already, free the associated memory. The
1483 * queue cannot be reused in case we need to allocate memory on
1484 * different socket than was previously used.
1486 if (dev->data->rx_queues[queue_id] != NULL) {
1487 rx_queue_free(dev->data->rx_queues[queue_id]);
1488 dev->data->rx_queues[queue_id] = NULL;
1491 /* allocate memory for the queue structure */
1492 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1495 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1501 q->nb_desc = nb_desc;
1502 q->port_id = dev->data->port_id;
1503 q->queue_id = queue_id;
1504 q->tail_ptr = (volatile uint32_t *)
1505 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1506 if (handle_rxconf(q, conf))
1509 /* allocate memory for the software ring */
1510 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1511 nb_desc * sizeof(struct rte_mbuf *),
1512 RTE_CACHE_LINE_SIZE, socket_id);
1513 if (q->sw_ring == NULL) {
1514 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1520 * allocate memory for the hardware descriptor ring. A memzone large
1521 * enough to hold the maximum ring size is requested to allow for
1522 * resizing in later calls to the queue setup function.
1524 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1525 dev->data->port_id, queue_id, socket_id,
1526 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1528 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1529 rte_free(q->sw_ring);
1533 q->hw_ring = mz->addr;
1534 #ifdef RTE_LIBRTE_XEN_DOM0
1535 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1537 q->hw_ring_phys_addr = mz->phys_addr;
1540 dev->data->rx_queues[queue_id] = q;
1545 fm10k_rx_queue_release(void *queue)
1547 PMD_INIT_FUNC_TRACE();
1549 rx_queue_free(queue);
1553 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1555 uint16_t tx_free_thresh;
1556 uint16_t tx_rs_thresh;
1558 /* constraint MACROs require that tx_free_thresh is configured
1559 * before tx_rs_thresh */
1560 if (conf->tx_free_thresh == 0)
1561 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1563 tx_free_thresh = conf->tx_free_thresh;
1565 /* make sure the requested threshold satisfies the constraints */
1566 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1567 FM10K_TX_FREE_THRESH_MAX(q),
1568 FM10K_TX_FREE_THRESH_DIV(q),
1570 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1571 "less than or equal to %u, "
1572 "greater than or equal to %u, "
1573 "and a divisor of %u",
1574 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1575 FM10K_TX_FREE_THRESH_MIN(q),
1576 FM10K_TX_FREE_THRESH_DIV(q));
1580 q->free_thresh = tx_free_thresh;
1582 if (conf->tx_rs_thresh == 0)
1583 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1585 tx_rs_thresh = conf->tx_rs_thresh;
1587 q->tx_deferred_start = conf->tx_deferred_start;
1589 /* make sure the requested threshold satisfies the constraints */
1590 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1591 FM10K_TX_RS_THRESH_MAX(q),
1592 FM10K_TX_RS_THRESH_DIV(q),
1594 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1595 "less than or equal to %u, "
1596 "greater than or equal to %u, "
1597 "and a divisor of %u",
1598 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1599 FM10K_TX_RS_THRESH_MIN(q),
1600 FM10K_TX_RS_THRESH_DIV(q));
1604 q->rs_thresh = tx_rs_thresh;
1610 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1611 uint16_t nb_desc, unsigned int socket_id,
1612 const struct rte_eth_txconf *conf)
1614 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1615 struct fm10k_tx_queue *q;
1616 const struct rte_memzone *mz;
1618 PMD_INIT_FUNC_TRACE();
1620 /* make sure a valid number of descriptors have been requested */
1621 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1622 FM10K_MULT_TX_DESC, nb_desc)) {
1623 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1624 "less than or equal to %"PRIu32", "
1625 "greater than or equal to %u, "
1626 "and a multiple of %u",
1627 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1628 FM10K_MULT_TX_DESC);
1633 * if this queue existed already, free the associated memory. The
1634 * queue cannot be reused in case we need to allocate memory on
1635 * different socket than was previously used.
1637 if (dev->data->tx_queues[queue_id] != NULL) {
1638 tx_queue_free(dev->data->tx_queues[queue_id]);
1639 dev->data->tx_queues[queue_id] = NULL;
1642 /* allocate memory for the queue structure */
1643 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1646 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1651 q->nb_desc = nb_desc;
1652 q->port_id = dev->data->port_id;
1653 q->queue_id = queue_id;
1654 q->tail_ptr = (volatile uint32_t *)
1655 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1656 if (handle_txconf(q, conf))
1659 /* allocate memory for the software ring */
1660 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1661 nb_desc * sizeof(struct rte_mbuf *),
1662 RTE_CACHE_LINE_SIZE, socket_id);
1663 if (q->sw_ring == NULL) {
1664 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1670 * allocate memory for the hardware descriptor ring. A memzone large
1671 * enough to hold the maximum ring size is requested to allow for
1672 * resizing in later calls to the queue setup function.
1674 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1675 dev->data->port_id, queue_id, socket_id,
1676 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1678 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1679 rte_free(q->sw_ring);
1683 q->hw_ring = mz->addr;
1684 #ifdef RTE_LIBRTE_XEN_DOM0
1685 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1687 q->hw_ring_phys_addr = mz->phys_addr;
1691 * allocate memory for the RS bit tracker. Enough slots to hold the
1692 * descriptor index for each RS bit needing to be set are required.
1694 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1695 ((nb_desc + 1) / q->rs_thresh) *
1697 RTE_CACHE_LINE_SIZE, socket_id);
1698 if (q->rs_tracker.list == NULL) {
1699 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1700 rte_free(q->sw_ring);
1705 dev->data->tx_queues[queue_id] = q;
1710 fm10k_tx_queue_release(void *queue)
1712 PMD_INIT_FUNC_TRACE();
1714 tx_queue_free(queue);
1718 fm10k_reta_update(struct rte_eth_dev *dev,
1719 struct rte_eth_rss_reta_entry64 *reta_conf,
1722 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 uint16_t i, j, idx, shift;
1727 PMD_INIT_FUNC_TRACE();
1729 if (reta_size > FM10K_MAX_RSS_INDICES) {
1730 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1731 "(%d) doesn't match the number hardware can supported "
1732 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1737 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1738 * 128-entries in 32 registers
1740 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1741 idx = i / RTE_RETA_GROUP_SIZE;
1742 shift = i % RTE_RETA_GROUP_SIZE;
1743 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1744 BIT_MASK_PER_UINT32);
1749 if (mask != BIT_MASK_PER_UINT32)
1750 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1752 for (j = 0; j < CHARS_PER_UINT32; j++) {
1753 if (mask & (0x1 << j)) {
1755 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1756 reta |= reta_conf[idx].reta[shift + j] <<
1760 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1767 fm10k_reta_query(struct rte_eth_dev *dev,
1768 struct rte_eth_rss_reta_entry64 *reta_conf,
1771 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772 uint16_t i, j, idx, shift;
1776 PMD_INIT_FUNC_TRACE();
1778 if (reta_size < FM10K_MAX_RSS_INDICES) {
1779 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1780 "(%d) doesn't match the number hardware can supported "
1781 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1786 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1787 * 128-entries in 32 registers
1789 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1790 idx = i / RTE_RETA_GROUP_SIZE;
1791 shift = i % RTE_RETA_GROUP_SIZE;
1792 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1793 BIT_MASK_PER_UINT32);
1797 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1798 for (j = 0; j < CHARS_PER_UINT32; j++) {
1799 if (mask & (0x1 << j))
1800 reta_conf[idx].reta[shift + j] = ((reta >>
1801 CHAR_BIT * j) & UINT8_MAX);
1809 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1810 struct rte_eth_rss_conf *rss_conf)
1812 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1813 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1815 uint64_t hf = rss_conf->rss_hf;
1818 PMD_INIT_FUNC_TRACE();
1820 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1821 FM10K_RSSRK_ENTRIES_PER_REG)
1828 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1829 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1830 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1831 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1832 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1833 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1834 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1835 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1836 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1838 /* If the mapping doesn't fit any supported, return */
1843 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1844 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1846 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1852 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1853 struct rte_eth_rss_conf *rss_conf)
1855 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1861 PMD_INIT_FUNC_TRACE();
1863 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1864 FM10K_RSSRK_ENTRIES_PER_REG)
1868 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1869 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1871 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1873 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1874 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1875 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1876 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1877 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1878 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1879 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1880 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1881 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1883 rss_conf->rss_hf = hf;
1889 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1891 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1894 /* Bind all local non-queue interrupt to vector 0 */
1897 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1898 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1899 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1900 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1901 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1902 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1904 /* Enable misc causes */
1905 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1906 FM10K_EIMR_ENABLE(THI_FAULT) |
1907 FM10K_EIMR_ENABLE(FUM_FAULT) |
1908 FM10K_EIMR_ENABLE(MAILBOX) |
1909 FM10K_EIMR_ENABLE(SWITCHREADY) |
1910 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1911 FM10K_EIMR_ENABLE(SRAMERROR) |
1912 FM10K_EIMR_ENABLE(VFLR));
1915 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1916 FM10K_ITR_MASK_CLEAR);
1917 FM10K_WRITE_FLUSH(hw);
1921 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
1923 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1928 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1929 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1930 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1931 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1932 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1933 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1935 /* Disable misc causes */
1936 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
1937 FM10K_EIMR_DISABLE(THI_FAULT) |
1938 FM10K_EIMR_DISABLE(FUM_FAULT) |
1939 FM10K_EIMR_DISABLE(MAILBOX) |
1940 FM10K_EIMR_DISABLE(SWITCHREADY) |
1941 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1942 FM10K_EIMR_DISABLE(SRAMERROR) |
1943 FM10K_EIMR_DISABLE(VFLR));
1946 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
1947 FM10K_WRITE_FLUSH(hw);
1951 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1953 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1954 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1956 /* Bind all local non-queue interrupt to vector 0 */
1959 /* Only INT 0 available, other 15 are reserved. */
1960 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1963 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1964 FM10K_ITR_MASK_CLEAR);
1965 FM10K_WRITE_FLUSH(hw);
1969 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
1971 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1972 uint32_t int_map = FM10K_INT_MAP_DISABLE;
1976 /* Only INT 0 available, other 15 are reserved. */
1977 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1980 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
1981 FM10K_WRITE_FLUSH(hw);
1985 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1987 struct fm10k_fault fault;
1989 const char *estr = "Unknown error";
1991 /* Process PCA fault */
1992 if (eicr & FM10K_EICR_PCA_FAULT) {
1993 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1996 switch (fault.type) {
1998 estr = "PCA_NO_FAULT"; break;
1999 case PCA_UNMAPPED_ADDR:
2000 estr = "PCA_UNMAPPED_ADDR"; break;
2001 case PCA_BAD_QACCESS_PF:
2002 estr = "PCA_BAD_QACCESS_PF"; break;
2003 case PCA_BAD_QACCESS_VF:
2004 estr = "PCA_BAD_QACCESS_VF"; break;
2005 case PCA_MALICIOUS_REQ:
2006 estr = "PCA_MALICIOUS_REQ"; break;
2007 case PCA_POISONED_TLP:
2008 estr = "PCA_POISONED_TLP"; break;
2010 estr = "PCA_TLP_ABORT"; break;
2014 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2015 estr, fault.func ? "VF" : "PF", fault.func,
2016 fault.address, fault.specinfo);
2019 /* Process THI fault */
2020 if (eicr & FM10K_EICR_THI_FAULT) {
2021 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2024 switch (fault.type) {
2026 estr = "THI_NO_FAULT"; break;
2027 case THI_MAL_DIS_Q_FAULT:
2028 estr = "THI_MAL_DIS_Q_FAULT"; break;
2032 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2033 estr, fault.func ? "VF" : "PF", fault.func,
2034 fault.address, fault.specinfo);
2037 /* Process FUM fault */
2038 if (eicr & FM10K_EICR_FUM_FAULT) {
2039 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2042 switch (fault.type) {
2044 estr = "FUM_NO_FAULT"; break;
2045 case FUM_UNMAPPED_ADDR:
2046 estr = "FUM_UNMAPPED_ADDR"; break;
2047 case FUM_POISONED_TLP:
2048 estr = "FUM_POISONED_TLP"; break;
2049 case FUM_BAD_VF_QACCESS:
2050 estr = "FUM_BAD_VF_QACCESS"; break;
2051 case FUM_ADD_DECODE_ERR:
2052 estr = "FUM_ADD_DECODE_ERR"; break;
2054 estr = "FUM_RO_ERROR"; break;
2055 case FUM_QPRC_CRC_ERROR:
2056 estr = "FUM_QPRC_CRC_ERROR"; break;
2057 case FUM_CSR_TIMEOUT:
2058 estr = "FUM_CSR_TIMEOUT"; break;
2059 case FUM_INVALID_TYPE:
2060 estr = "FUM_INVALID_TYPE"; break;
2061 case FUM_INVALID_LENGTH:
2062 estr = "FUM_INVALID_LENGTH"; break;
2063 case FUM_INVALID_BE:
2064 estr = "FUM_INVALID_BE"; break;
2065 case FUM_INVALID_ALIGN:
2066 estr = "FUM_INVALID_ALIGN"; break;
2070 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2071 estr, fault.func ? "VF" : "PF", fault.func,
2072 fault.address, fault.specinfo);
2077 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2082 * PF interrupt handler triggered by NIC for handling specific interrupt.
2085 * Pointer to interrupt handle.
2087 * The address of parameter (struct rte_eth_dev *) regsitered before.
2093 fm10k_dev_interrupt_handler_pf(
2094 __rte_unused struct rte_intr_handle *handle,
2097 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2098 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2099 uint32_t cause, status;
2101 if (hw->mac.type != fm10k_mac_pf)
2104 cause = FM10K_READ_REG(hw, FM10K_EICR);
2106 /* Handle PCI fault cases */
2107 if (cause & FM10K_EICR_FAULT_MASK) {
2108 PMD_INIT_LOG(ERR, "INT: find fault!");
2109 fm10k_dev_handle_fault(hw, cause);
2112 /* Handle switch up/down */
2113 if (cause & FM10K_EICR_SWITCHNOTREADY)
2114 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2116 if (cause & FM10K_EICR_SWITCHREADY)
2117 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2119 /* Handle mailbox message */
2121 hw->mbx.ops.process(hw, &hw->mbx);
2122 fm10k_mbx_unlock(hw);
2124 /* Handle SRAM error */
2125 if (cause & FM10K_EICR_SRAMERROR) {
2126 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2128 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2129 /* Write to clear pending bits */
2130 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2132 /* Todo: print out error message after shared code updates */
2135 /* Clear these 3 events if having any */
2136 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2137 FM10K_EICR_SWITCHREADY;
2139 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2141 /* Re-enable interrupt from device side */
2142 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2143 FM10K_ITR_MASK_CLEAR);
2144 /* Re-enable interrupt from host side */
2145 rte_intr_enable(&(dev->pci_dev->intr_handle));
2149 * VF interrupt handler triggered by NIC for handling specific interrupt.
2152 * Pointer to interrupt handle.
2154 * The address of parameter (struct rte_eth_dev *) regsitered before.
2160 fm10k_dev_interrupt_handler_vf(
2161 __rte_unused struct rte_intr_handle *handle,
2164 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2165 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2167 if (hw->mac.type != fm10k_mac_vf)
2170 /* Handle mailbox message if lock is acquired */
2172 hw->mbx.ops.process(hw, &hw->mbx);
2173 fm10k_mbx_unlock(hw);
2175 /* Re-enable interrupt from device side */
2176 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2177 FM10K_ITR_MASK_CLEAR);
2178 /* Re-enable interrupt from host side */
2179 rte_intr_enable(&(dev->pci_dev->intr_handle));
2182 /* Mailbox message handler in VF */
2183 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2184 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2185 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2186 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2187 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2190 /* Mailbox message handler in PF */
2191 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2192 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2193 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2194 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2195 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2196 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2197 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2198 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2202 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2206 /* Initialize mailbox lock */
2207 fm10k_mbx_initlock(hw);
2209 /* Replace default message handler with new ones */
2210 if (hw->mac.type == fm10k_mac_pf)
2211 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2213 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2216 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2220 /* Connect to SM for PF device or PF for VF device */
2221 return hw->mbx.ops.connect(hw, &hw->mbx);
2225 fm10k_close_mbx_service(struct fm10k_hw *hw)
2227 /* Disconnect from SM for PF device or PF for VF device */
2228 hw->mbx.ops.disconnect(hw, &hw->mbx);
2231 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2232 .dev_configure = fm10k_dev_configure,
2233 .dev_start = fm10k_dev_start,
2234 .dev_stop = fm10k_dev_stop,
2235 .dev_close = fm10k_dev_close,
2236 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2237 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2238 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2239 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2240 .stats_get = fm10k_stats_get,
2241 .stats_reset = fm10k_stats_reset,
2242 .link_update = fm10k_link_update,
2243 .dev_infos_get = fm10k_dev_infos_get,
2244 .vlan_filter_set = fm10k_vlan_filter_set,
2245 .vlan_offload_set = fm10k_vlan_offload_set,
2246 .mac_addr_add = fm10k_macaddr_add,
2247 .mac_addr_remove = fm10k_macaddr_remove,
2248 .rx_queue_start = fm10k_dev_rx_queue_start,
2249 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2250 .tx_queue_start = fm10k_dev_tx_queue_start,
2251 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2252 .rx_queue_setup = fm10k_rx_queue_setup,
2253 .rx_queue_release = fm10k_rx_queue_release,
2254 .tx_queue_setup = fm10k_tx_queue_setup,
2255 .tx_queue_release = fm10k_tx_queue_release,
2256 .reta_update = fm10k_reta_update,
2257 .reta_query = fm10k_reta_query,
2258 .rss_hash_update = fm10k_rss_hash_update,
2259 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2263 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2265 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2267 struct fm10k_macvlan_filter_info *macvlan;
2269 PMD_INIT_FUNC_TRACE();
2271 dev->dev_ops = &fm10k_eth_dev_ops;
2272 dev->rx_pkt_burst = &fm10k_recv_pkts;
2273 dev->tx_pkt_burst = &fm10k_xmit_pkts;
2275 if (dev->data->scattered_rx)
2276 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2278 /* only initialize in the primary process */
2279 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2282 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2283 memset(macvlan, 0, sizeof(*macvlan));
2284 /* Vendor and Device ID need to be set before init of shared code */
2285 memset(hw, 0, sizeof(*hw));
2286 hw->device_id = dev->pci_dev->id.device_id;
2287 hw->vendor_id = dev->pci_dev->id.vendor_id;
2288 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2289 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2290 hw->revision_id = 0;
2291 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2292 if (hw->hw_addr == NULL) {
2293 PMD_INIT_LOG(ERR, "Bad mem resource."
2294 " Try to blacklist unused devices.");
2298 /* Store fm10k_adapter pointer */
2299 hw->back = dev->data->dev_private;
2301 /* Initialize the shared code */
2302 diag = fm10k_init_shared_code(hw);
2303 if (diag != FM10K_SUCCESS) {
2304 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2309 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2310 * there is no way to get link status without reading BAR4. Until this
2311 * works, assume we have maximum bandwidth.
2312 * @todo - fix bus info
2314 hw->bus_caps.speed = fm10k_bus_speed_8000;
2315 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2316 hw->bus_caps.payload = fm10k_bus_payload_512;
2317 hw->bus.speed = fm10k_bus_speed_8000;
2318 hw->bus.width = fm10k_bus_width_pcie_x8;
2319 hw->bus.payload = fm10k_bus_payload_256;
2321 /* Initialize the hw */
2322 diag = fm10k_init_hw(hw);
2323 if (diag != FM10K_SUCCESS) {
2324 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2328 /* Initialize MAC address(es) */
2329 dev->data->mac_addrs = rte_zmalloc("fm10k",
2330 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2331 if (dev->data->mac_addrs == NULL) {
2332 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2336 diag = fm10k_read_mac_addr(hw);
2338 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2339 &dev->data->mac_addrs[0]);
2341 if (diag != FM10K_SUCCESS ||
2342 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2344 /* Generate a random addr */
2345 eth_random_addr(hw->mac.addr);
2346 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2347 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2348 &dev->data->mac_addrs[0]);
2351 /* Reset the hw statistics */
2352 fm10k_stats_reset(dev);
2355 diag = fm10k_reset_hw(hw);
2356 if (diag != FM10K_SUCCESS) {
2357 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2361 /* Setup mailbox service */
2362 diag = fm10k_setup_mbx_service(hw);
2363 if (diag != FM10K_SUCCESS) {
2364 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2368 /*PF/VF has different interrupt handling mechanism */
2369 if (hw->mac.type == fm10k_mac_pf) {
2370 /* register callback func to eal lib */
2371 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2372 fm10k_dev_interrupt_handler_pf, (void *)dev);
2374 /* enable MISC interrupt */
2375 fm10k_dev_enable_intr_pf(dev);
2377 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2378 fm10k_dev_interrupt_handler_vf, (void *)dev);
2380 fm10k_dev_enable_intr_vf(dev);
2383 /* Enable uio intr after callback registered */
2384 rte_intr_enable(&(dev->pci_dev->intr_handle));
2386 hw->mac.ops.update_int_moderator(hw);
2388 /* Make sure Switch Manager is ready before going forward. */
2389 if (hw->mac.type == fm10k_mac_pf) {
2390 int switch_ready = 0;
2393 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2395 hw->mac.ops.get_host_state(hw, &switch_ready);
2396 fm10k_mbx_unlock(hw);
2399 /* Delay some time to acquire async LPORT_MAP info. */
2400 rte_delay_us(WAIT_SWITCH_MSG_US);
2403 if (switch_ready == 0) {
2404 PMD_INIT_LOG(ERR, "switch is not ready");
2410 * Below function will trigger operations on mailbox, acquire lock to
2411 * avoid race condition from interrupt handler. Operations on mailbox
2412 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2413 * will handle and generate an interrupt to our side. Then, FIFO in
2414 * mailbox will be touched.
2417 /* Enable port first */
2418 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2420 /* Set unicast mode by default. App can change to other mode in other
2423 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2424 FM10K_XCAST_MODE_NONE);
2426 fm10k_mbx_unlock(hw);
2428 /* Add default mac address */
2429 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2430 MAIN_VSI_POOL_NUMBER);
2436 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2438 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2440 PMD_INIT_FUNC_TRACE();
2442 /* only uninitialize in the primary process */
2443 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2446 /* safe to close dev here */
2447 fm10k_dev_close(dev);
2449 dev->dev_ops = NULL;
2450 dev->rx_pkt_burst = NULL;
2451 dev->tx_pkt_burst = NULL;
2453 /* disable uio/vfio intr */
2454 rte_intr_disable(&(dev->pci_dev->intr_handle));
2456 /*PF/VF has different interrupt handling mechanism */
2457 if (hw->mac.type == fm10k_mac_pf) {
2458 /* disable interrupt */
2459 fm10k_dev_disable_intr_pf(dev);
2461 /* unregister callback func to eal lib */
2462 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2463 fm10k_dev_interrupt_handler_pf, (void *)dev);
2465 /* disable interrupt */
2466 fm10k_dev_disable_intr_vf(dev);
2468 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2469 fm10k_dev_interrupt_handler_vf, (void *)dev);
2472 /* free mac memory */
2473 if (dev->data->mac_addrs) {
2474 rte_free(dev->data->mac_addrs);
2475 dev->data->mac_addrs = NULL;
2478 memset(hw, 0, sizeof(*hw));
2484 * The set of PCI devices this driver supports. This driver will enable both PF
2485 * and SRIOV-VF devices.
2487 static const struct rte_pci_id pci_id_fm10k_map[] = {
2488 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2489 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2490 #include "rte_pci_dev_ids.h"
2491 { .vendor_id = 0, /* sentinel */ },
2494 static struct eth_driver rte_pmd_fm10k = {
2496 .name = "rte_pmd_fm10k",
2497 .id_table = pci_id_fm10k_map,
2498 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2500 .eth_dev_init = eth_fm10k_dev_init,
2501 .eth_dev_uninit = eth_fm10k_dev_uninit,
2502 .dev_private_size = sizeof(struct fm10k_adapter),
2506 * Driver initialization routine.
2507 * Invoked once at EAL init time.
2508 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2511 rte_pmd_fm10k_init(__rte_unused const char *name,
2512 __rte_unused const char *params)
2514 PMD_INIT_FUNC_TRACE();
2515 rte_eth_driver_register(&rte_pmd_fm10k);
2519 static struct rte_driver rte_fm10k_driver = {
2521 .init = rte_pmd_fm10k_init,
2524 PMD_REGISTER_DRIVER(rte_fm10k_driver);