2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
58 #include "nfp_net_pmd.h"
59 #include "nfp_net_logs.h"
60 #include "nfp_net_ctrl.h"
63 static void nfp_net_close(struct rte_eth_dev *dev);
64 static int nfp_net_configure(struct rte_eth_dev *dev);
65 static void nfp_net_dev_interrupt_handler(void *param);
66 static void nfp_net_dev_interrupt_delayed_handler(void *param);
67 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
68 static void nfp_net_infos_get(struct rte_eth_dev *dev,
69 struct rte_eth_dev_info *dev_info);
70 static int nfp_net_init(struct rte_eth_dev *eth_dev);
71 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
72 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
73 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
74 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
75 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
77 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
79 static void nfp_net_rx_queue_release(void *rxq);
80 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
81 uint16_t nb_desc, unsigned int socket_id,
82 const struct rte_eth_rxconf *rx_conf,
83 struct rte_mempool *mp);
84 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
85 static void nfp_net_tx_queue_release(void *txq);
86 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
87 uint16_t nb_desc, unsigned int socket_id,
88 const struct rte_eth_txconf *tx_conf);
89 static int nfp_net_start(struct rte_eth_dev *dev);
90 static void nfp_net_stats_get(struct rte_eth_dev *dev,
91 struct rte_eth_stats *stats);
92 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
93 static void nfp_net_stop(struct rte_eth_dev *dev);
94 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
98 * The offset of the queue controller queues in the PCIe Target. These
99 * happen to be at the same offset on the NFP6000 and the NFP3200 so
100 * we use a single macro here.
102 #define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff)))
104 /* Maximum value which can be added to a queue with one transaction */
105 #define NFP_QCP_MAX_ADD 0x7f
107 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
108 (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
110 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
112 NFP_QCP_READ_PTR = 0,
117 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
118 * @q: Base address for queue structure
119 * @ptr: Add to the Read or Write pointer
120 * @val: Value to add to the queue pointer
122 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
125 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
129 if (ptr == NFP_QCP_READ_PTR)
130 off = NFP_QCP_QUEUE_ADD_RPTR;
132 off = NFP_QCP_QUEUE_ADD_WPTR;
134 while (val > NFP_QCP_MAX_ADD) {
135 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
136 val -= NFP_QCP_MAX_ADD;
139 nn_writel(rte_cpu_to_le_32(val), q + off);
143 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
144 * @q: Base address for queue structure
145 * @ptr: Read or Write pointer
147 static inline uint32_t
148 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
153 if (ptr == NFP_QCP_READ_PTR)
154 off = NFP_QCP_QUEUE_STS_LO;
156 off = NFP_QCP_QUEUE_STS_HI;
158 val = rte_cpu_to_le_32(nn_readl(q + off));
160 if (ptr == NFP_QCP_READ_PTR)
161 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
163 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
167 * Functions to read/write from/to Config BAR
168 * Performs any endian conversion necessary.
170 static inline uint8_t
171 nn_cfg_readb(struct nfp_net_hw *hw, int off)
173 return nn_readb(hw->ctrl_bar + off);
177 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
179 nn_writeb(val, hw->ctrl_bar + off);
182 static inline uint32_t
183 nn_cfg_readl(struct nfp_net_hw *hw, int off)
185 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
189 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
191 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
194 static inline uint64_t
195 nn_cfg_readq(struct nfp_net_hw *hw, int off)
197 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
201 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
203 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
207 * Atomically reads link status information from global structure rte_eth_dev.
210 * - Pointer to the structure rte_eth_dev to read from.
211 * - Pointer to the buffer to be saved with the link status.
214 * - On success, zero.
215 * - On failure, negative value.
218 nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
219 struct rte_eth_link *link)
221 struct rte_eth_link *dst = link;
222 struct rte_eth_link *src = &dev->data->dev_link;
224 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
225 *(uint64_t *)src) == 0)
232 * Atomically writes the link status information into global
233 * structure rte_eth_dev.
236 * - Pointer to the structure rte_eth_dev to read from.
237 * - Pointer to the buffer to be saved with the link status.
240 * - On success, zero.
241 * - On failure, negative value.
244 nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
245 struct rte_eth_link *link)
247 struct rte_eth_link *dst = &dev->data->dev_link;
248 struct rte_eth_link *src = link;
250 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
251 *(uint64_t *)src) == 0)
258 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
262 if (rxq->rxbufs == NULL)
265 for (i = 0; i < rxq->rx_count; i++) {
266 if (rxq->rxbufs[i].mbuf) {
267 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
268 rxq->rxbufs[i].mbuf = NULL;
274 nfp_net_rx_queue_release(void *rx_queue)
276 struct nfp_net_rxq *rxq = rx_queue;
279 nfp_net_rx_queue_release_mbufs(rxq);
280 rte_free(rxq->rxbufs);
286 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
288 nfp_net_rx_queue_release_mbufs(rxq);
294 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
298 if (txq->txbufs == NULL)
301 for (i = 0; i < txq->tx_count; i++) {
302 if (txq->txbufs[i].mbuf) {
303 rte_pktmbuf_free(txq->txbufs[i].mbuf);
304 txq->txbufs[i].mbuf = NULL;
310 nfp_net_tx_queue_release(void *tx_queue)
312 struct nfp_net_txq *txq = tx_queue;
315 nfp_net_tx_queue_release_mbufs(txq);
316 rte_free(txq->txbufs);
322 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
324 nfp_net_tx_queue_release_mbufs(txq);
330 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
334 struct timespec wait;
336 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
339 if (hw->qcp_cfg == NULL)
340 rte_panic("Bad configuration queue pointer\n");
342 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
345 wait.tv_nsec = 1000000;
347 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
349 /* Poll update field, waiting for NFP to ack the config */
350 for (cnt = 0; ; cnt++) {
351 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
354 if (new & NFP_NET_CFG_UPDATE_ERR) {
355 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
358 if (cnt >= NFP_NET_POLL_TIMEOUT) {
359 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
360 " %dms", update, cnt);
361 rte_panic("Exiting\n");
363 nanosleep(&wait, 0); /* waiting for a 1ms */
365 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
370 * Reconfigure the NIC
371 * @nn: device to reconfigure
372 * @ctrl: The value for the ctrl field in the BAR config
373 * @update: The value for the update field in the BAR config
375 * Write the update word to the BAR and ping the reconfig queue. Then poll
376 * until the firmware has acknowledged the update by zeroing the update word.
379 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
383 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
386 rte_spinlock_lock(&hw->reconfig_lock);
388 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
389 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
393 err = __nfp_net_reconfig(hw, update);
395 rte_spinlock_unlock(&hw->reconfig_lock);
401 * Reconfig errors imply situations where they can be handled.
402 * Otherwise, rte_panic is called inside __nfp_net_reconfig
404 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
410 * Configure an Ethernet device. This function must be invoked first
411 * before any other function in the Ethernet API. This function can
412 * also be re-invoked when a device is in the stopped state.
415 nfp_net_configure(struct rte_eth_dev *dev)
417 struct rte_eth_conf *dev_conf;
418 struct rte_eth_rxmode *rxmode;
419 struct rte_eth_txmode *txmode;
420 uint32_t new_ctrl = 0;
422 struct nfp_net_hw *hw;
424 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
427 * A DPDK app sends info about how many queues to use and how
428 * those queues need to be configured. This is used by the
429 * DPDK core and it makes sure no more queues than those
430 * advertised by the driver are requested. This function is
431 * called after that internal process
434 PMD_INIT_LOG(DEBUG, "Configure");
436 dev_conf = &dev->data->dev_conf;
437 rxmode = &dev_conf->rxmode;
438 txmode = &dev_conf->txmode;
440 /* Checking TX mode */
441 if (txmode->mq_mode) {
442 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
446 /* Checking RX mode */
447 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
448 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
449 update = NFP_NET_CFG_UPDATE_RSS;
450 new_ctrl = NFP_NET_CFG_CTRL_RSS;
452 PMD_INIT_LOG(INFO, "RSS not supported");
457 if (rxmode->split_hdr_size) {
458 PMD_INIT_LOG(INFO, "rxmode does not support split header");
462 if (rxmode->hw_ip_checksum) {
463 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
464 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
466 PMD_INIT_LOG(INFO, "RXCSUM not supported");
471 if (rxmode->hw_vlan_filter) {
472 PMD_INIT_LOG(INFO, "VLAN filter not supported");
476 if (rxmode->hw_vlan_strip) {
477 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
478 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
480 PMD_INIT_LOG(INFO, "hw vlan strip not supported");
485 if (rxmode->hw_vlan_extend) {
486 PMD_INIT_LOG(INFO, "VLAN extended not supported");
490 /* Supporting VLAN insertion by default */
491 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
492 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
494 if (rxmode->jumbo_frame)
495 /* this is handled in rte_eth_dev_configure */
497 if (rxmode->hw_strip_crc) {
498 PMD_INIT_LOG(INFO, "strip CRC not supported");
502 if (rxmode->enable_scatter) {
503 PMD_INIT_LOG(INFO, "Scatter not supported");
510 update |= NFP_NET_CFG_UPDATE_GEN;
512 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
513 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
522 nfp_net_enable_queues(struct rte_eth_dev *dev)
524 struct nfp_net_hw *hw;
525 uint64_t enabled_queues = 0;
528 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
530 /* Enabling the required TX queues in the device */
531 for (i = 0; i < dev->data->nb_tx_queues; i++)
532 enabled_queues |= (1 << i);
534 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
538 /* Enabling the required RX queues in the device */
539 for (i = 0; i < dev->data->nb_rx_queues; i++)
540 enabled_queues |= (1 << i);
542 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
546 nfp_net_disable_queues(struct rte_eth_dev *dev)
548 struct nfp_net_hw *hw;
549 uint32_t new_ctrl, update = 0;
551 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
554 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
556 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
557 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
558 NFP_NET_CFG_UPDATE_MSIX;
560 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
561 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
563 /* If an error when reconfig we avoid to change hw state */
564 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
571 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
575 for (i = 0; i < dev->data->nb_rx_queues; i++) {
576 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
583 nfp_net_params_setup(struct nfp_net_hw *hw)
585 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
586 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
590 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
592 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
595 static void nfp_net_read_mac(struct nfp_net_hw *hw)
599 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
600 memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
602 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
603 memcpy(&hw->mac_addr[4], &tmp, 2);
607 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
608 struct rte_intr_handle *intr_handle)
610 struct nfp_net_hw *hw;
613 if (!intr_handle->intr_vec) {
614 intr_handle->intr_vec =
615 rte_zmalloc("intr_vec",
616 dev->data->nb_rx_queues * sizeof(int), 0);
617 if (!intr_handle->intr_vec) {
618 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
619 " intr_vec", dev->data->nb_rx_queues);
624 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
626 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
627 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
628 /* UIO just supports one queue and no LSC*/
629 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
631 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
632 for (i = 0; i < dev->data->nb_rx_queues; i++)
634 * The first msix vector is reserved for non
637 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
640 /* Avoiding TX interrupts */
641 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
646 nfp_net_start(struct rte_eth_dev *dev)
648 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
649 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
650 uint32_t new_ctrl, update = 0;
651 struct nfp_net_hw *hw;
652 uint32_t intr_vector;
655 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
657 PMD_INIT_LOG(DEBUG, "Start");
659 /* Disabling queues just in case... */
660 nfp_net_disable_queues(dev);
662 /* Writing configuration parameters in the device */
663 nfp_net_params_setup(hw);
665 /* Enabling the required queues in the device */
666 nfp_net_enable_queues(dev);
668 /* check and configure queue intr-vector mapping */
669 if (dev->data->dev_conf.intr_conf.rxq != 0) {
670 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
672 * Better not to share LSC with RX interrupts.
673 * Unregistering LSC interrupt handler
675 rte_intr_callback_unregister(&pci_dev->intr_handle,
676 nfp_net_dev_interrupt_handler, (void *)dev);
678 if (dev->data->nb_rx_queues > 1) {
679 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
680 "supports 1 queue with UIO");
684 intr_vector = dev->data->nb_rx_queues;
685 if (rte_intr_efd_enable(intr_handle, intr_vector))
689 if (rte_intr_dp_is_en(intr_handle))
690 nfp_configure_rx_interrupt(dev, intr_handle);
692 rte_intr_enable(intr_handle);
695 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
696 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
698 /* Just configuring queues interrupts when necessary */
699 if (rte_intr_dp_is_en(intr_handle))
700 update |= NFP_NET_CFG_UPDATE_MSIX;
702 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
703 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
705 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
706 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
710 * Allocating rte mbuffs for configured rx queues.
711 * This requires queues being enabled before
713 if (nfp_net_rx_freelist_setup(dev) < 0) {
724 * An error returned by this function should mean the app
725 * exiting and then the system releasing all the memory
726 * allocated even memory coming from hugepages.
728 * The device could be enabled at this point with some queues
729 * ready for getting packets. This is true if the call to
730 * nfp_net_rx_freelist_setup() succeeds for some queues but
731 * fails for subsequent queues.
733 * This should make the app exiting but better if we tell the
736 nfp_net_disable_queues(dev);
741 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
743 nfp_net_stop(struct rte_eth_dev *dev)
747 PMD_INIT_LOG(DEBUG, "Stop");
749 nfp_net_disable_queues(dev);
752 for (i = 0; i < dev->data->nb_tx_queues; i++) {
753 nfp_net_reset_tx_queue(
754 (struct nfp_net_txq *)dev->data->tx_queues[i]);
757 for (i = 0; i < dev->data->nb_rx_queues; i++) {
758 nfp_net_reset_rx_queue(
759 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
763 /* Reset and stop device. The device can not be restarted. */
765 nfp_net_close(struct rte_eth_dev *dev)
767 struct nfp_net_hw *hw;
768 struct rte_pci_device *pci_dev;
770 PMD_INIT_LOG(DEBUG, "Close");
772 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
773 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
776 * We assume that the DPDK application is stopping all the
777 * threads/queues before calling the device close function.
782 rte_intr_disable(&pci_dev->intr_handle);
783 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
785 /* unregister callback func from eal lib */
786 rte_intr_callback_unregister(&pci_dev->intr_handle,
787 nfp_net_dev_interrupt_handler,
791 * The ixgbe PMD driver disables the pcie master on the
792 * device. The i40e does not...
797 nfp_net_promisc_enable(struct rte_eth_dev *dev)
799 uint32_t new_ctrl, update = 0;
800 struct nfp_net_hw *hw;
802 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
804 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
806 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
807 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
811 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
812 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
816 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
817 update = NFP_NET_CFG_UPDATE_GEN;
820 * DPDK sets promiscuous mode on just after this call assuming
821 * it can not fail ...
823 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
830 nfp_net_promisc_disable(struct rte_eth_dev *dev)
832 uint32_t new_ctrl, update = 0;
833 struct nfp_net_hw *hw;
835 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
837 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
838 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
842 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
843 update = NFP_NET_CFG_UPDATE_GEN;
846 * DPDK sets promiscuous mode off just before this call
847 * assuming it can not fail ...
849 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
856 * return 0 means link status changed, -1 means not changed
858 * Wait to complete is needed as it can take up to 9 seconds to get the Link
862 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
864 struct nfp_net_hw *hw;
865 struct rte_eth_link link, old;
866 uint32_t nn_link_status;
868 static const uint32_t ls_to_ethtool[] = {
869 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
870 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
871 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
872 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
873 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
874 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
875 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
876 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
879 PMD_DRV_LOG(DEBUG, "Link update\n");
881 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
883 memset(&old, 0, sizeof(old));
884 nfp_net_dev_atomic_read_link_status(dev, &old);
886 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
888 memset(&link, 0, sizeof(struct rte_eth_link));
890 if (nn_link_status & NFP_NET_CFG_STS_LINK)
891 link.link_status = ETH_LINK_UP;
893 link.link_duplex = ETH_LINK_FULL_DUPLEX;
895 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
896 NFP_NET_CFG_STS_LINK_RATE_MASK;
898 if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
899 ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
900 (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
901 /* We really do not know the speed wil old firmware */
902 link.link_speed = ETH_SPEED_NUM_NONE;
904 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
905 link.link_speed = ETH_SPEED_NUM_NONE;
907 link.link_speed = ls_to_ethtool[nn_link_status];
910 if (old.link_status != link.link_status) {
911 nfp_net_dev_atomic_write_link_status(dev, &link);
912 if (link.link_status)
913 PMD_DRV_LOG(INFO, "NIC Link is Up\n");
915 PMD_DRV_LOG(INFO, "NIC Link is Down\n");
923 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
926 struct nfp_net_hw *hw;
927 struct rte_eth_stats nfp_dev_stats;
929 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
931 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
933 /* reading per RX ring stats */
934 for (i = 0; i < dev->data->nb_rx_queues; i++) {
935 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
938 nfp_dev_stats.q_ipackets[i] =
939 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
941 nfp_dev_stats.q_ipackets[i] -=
942 hw->eth_stats_base.q_ipackets[i];
944 nfp_dev_stats.q_ibytes[i] =
945 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
947 nfp_dev_stats.q_ibytes[i] -=
948 hw->eth_stats_base.q_ibytes[i];
951 /* reading per TX ring stats */
952 for (i = 0; i < dev->data->nb_tx_queues; i++) {
953 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
956 nfp_dev_stats.q_opackets[i] =
957 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
959 nfp_dev_stats.q_opackets[i] -=
960 hw->eth_stats_base.q_opackets[i];
962 nfp_dev_stats.q_obytes[i] =
963 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
965 nfp_dev_stats.q_obytes[i] -=
966 hw->eth_stats_base.q_obytes[i];
969 nfp_dev_stats.ipackets =
970 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
972 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
974 nfp_dev_stats.ibytes =
975 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
977 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
979 nfp_dev_stats.opackets =
980 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
982 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
984 nfp_dev_stats.obytes =
985 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
987 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
989 /* reading general device stats */
990 nfp_dev_stats.ierrors =
991 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
993 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
995 nfp_dev_stats.oerrors =
996 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
998 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1000 /* RX ring mbuf allocation failures */
1001 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1003 nfp_dev_stats.imissed =
1004 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1006 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1009 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1013 nfp_net_stats_reset(struct rte_eth_dev *dev)
1016 struct nfp_net_hw *hw;
1018 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1021 * hw->eth_stats_base records the per counter starting point.
1022 * Lets update it now
1025 /* reading per RX ring stats */
1026 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1027 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1030 hw->eth_stats_base.q_ipackets[i] =
1031 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1033 hw->eth_stats_base.q_ibytes[i] =
1034 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1037 /* reading per TX ring stats */
1038 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1039 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1042 hw->eth_stats_base.q_opackets[i] =
1043 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1045 hw->eth_stats_base.q_obytes[i] =
1046 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1049 hw->eth_stats_base.ipackets =
1050 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1052 hw->eth_stats_base.ibytes =
1053 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1055 hw->eth_stats_base.opackets =
1056 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1058 hw->eth_stats_base.obytes =
1059 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1061 /* reading general device stats */
1062 hw->eth_stats_base.ierrors =
1063 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1065 hw->eth_stats_base.oerrors =
1066 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1068 /* RX ring mbuf allocation failures */
1069 dev->data->rx_mbuf_alloc_failed = 0;
1071 hw->eth_stats_base.imissed =
1072 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1076 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1078 struct nfp_net_hw *hw;
1080 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1082 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1083 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1084 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1085 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1086 dev_info->max_rx_pktlen = hw->mtu;
1087 /* Next should change when PF support is implemented */
1088 dev_info->max_mac_addrs = 1;
1090 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1091 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1093 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1094 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1095 DEV_RX_OFFLOAD_UDP_CKSUM |
1096 DEV_RX_OFFLOAD_TCP_CKSUM;
1098 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1099 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1101 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1102 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1103 DEV_TX_OFFLOAD_UDP_CKSUM |
1104 DEV_TX_OFFLOAD_TCP_CKSUM;
1106 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1108 .pthresh = DEFAULT_RX_PTHRESH,
1109 .hthresh = DEFAULT_RX_HTHRESH,
1110 .wthresh = DEFAULT_RX_WTHRESH,
1112 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1116 dev_info->default_txconf = (struct rte_eth_txconf) {
1118 .pthresh = DEFAULT_TX_PTHRESH,
1119 .hthresh = DEFAULT_TX_HTHRESH,
1120 .wthresh = DEFAULT_TX_WTHRESH,
1122 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1123 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1124 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1125 ETH_TXQ_FLAGS_NOOFFLOADS,
1128 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1129 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1131 dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
1132 ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
1133 ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
1135 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
1136 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1139 static const uint32_t *
1140 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1142 static const uint32_t ptypes[] = {
1143 /* refers to nfp_net_set_hash() */
1144 RTE_PTYPE_INNER_L3_IPV4,
1145 RTE_PTYPE_INNER_L3_IPV6,
1146 RTE_PTYPE_INNER_L3_IPV6_EXT,
1147 RTE_PTYPE_INNER_L4_MASK,
1151 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1157 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1159 struct nfp_net_rxq *rxq;
1160 struct nfp_net_rx_desc *rxds;
1164 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1171 * Other PMDs are just checking the DD bit in intervals of 4
1172 * descriptors and counting all four if the first has the DD
1173 * bit on. Of course, this is not accurate but can be good for
1174 * performance. But ideally that should be done in descriptors
1175 * chunks belonging to the same cache line
1178 while (count < rxq->rx_count) {
1179 rxds = &rxq->rxds[idx];
1180 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1187 if ((idx) == rxq->rx_count)
1195 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1197 struct rte_pci_device *pci_dev;
1198 struct nfp_net_hw *hw;
1201 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1204 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1207 /* Make sure all updates are written before un-masking */
1209 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1210 NFP_NET_CFG_ICR_UNMASKED);
1215 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1217 struct rte_pci_device *pci_dev;
1218 struct nfp_net_hw *hw;
1221 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1222 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1224 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1227 /* Make sure all updates are written before un-masking */
1229 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1234 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1236 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1237 struct rte_eth_link link;
1239 memset(&link, 0, sizeof(link));
1240 nfp_net_dev_atomic_read_link_status(dev, &link);
1241 if (link.link_status)
1242 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1243 (int)(dev->data->port_id), (unsigned)link.link_speed,
1244 link.link_duplex == ETH_LINK_FULL_DUPLEX
1245 ? "full-duplex" : "half-duplex");
1247 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1248 (int)(dev->data->port_id));
1250 RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1251 pci_dev->addr.domain, pci_dev->addr.bus,
1252 pci_dev->addr.devid, pci_dev->addr.function);
1255 /* Interrupt configuration and handling */
1258 * nfp_net_irq_unmask - Unmask an interrupt
1260 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1261 * clear the ICR for the entry.
1264 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1266 struct nfp_net_hw *hw;
1267 struct rte_pci_device *pci_dev;
1269 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1270 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1272 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1273 /* If MSI-X auto-masking is used, clear the entry */
1275 rte_intr_enable(&pci_dev->intr_handle);
1277 /* Make sure all updates are written before un-masking */
1279 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1280 NFP_NET_CFG_ICR_UNMASKED);
1285 nfp_net_dev_interrupt_handler(void *param)
1288 struct rte_eth_link link;
1289 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1291 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1293 /* get the link status */
1294 memset(&link, 0, sizeof(link));
1295 nfp_net_dev_atomic_read_link_status(dev, &link);
1297 nfp_net_link_update(dev, 0);
1300 if (!link.link_status) {
1301 /* handle it 1 sec later, wait it being stable */
1302 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1303 /* likely to down */
1305 /* handle it 4 sec later, wait it being stable */
1306 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1309 if (rte_eal_alarm_set(timeout * 1000,
1310 nfp_net_dev_interrupt_delayed_handler,
1312 RTE_LOG(ERR, PMD, "Error setting alarm");
1314 nfp_net_irq_unmask(dev);
1319 * Interrupt handler which shall be registered for alarm callback for delayed
1320 * handling specific interrupt to wait for the stable nic state. As the NIC
1321 * interrupt state is not stable for nfp after link is just down, it needs
1322 * to wait 4 seconds to get the stable status.
1324 * @param handle Pointer to interrupt handle.
1325 * @param param The address of parameter (struct rte_eth_dev *)
1330 nfp_net_dev_interrupt_delayed_handler(void *param)
1332 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1334 nfp_net_link_update(dev, 0);
1335 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
1337 nfp_net_dev_link_status_print(dev);
1340 nfp_net_irq_unmask(dev);
1344 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1346 struct nfp_net_hw *hw;
1348 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1350 /* check that mtu is within the allowed range */
1351 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1354 /* switch to jumbo mode if needed */
1355 if ((uint32_t)mtu > ETHER_MAX_LEN)
1356 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1358 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1360 /* update max frame size */
1361 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1363 /* writing to configuration space */
1364 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1372 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1373 uint16_t queue_idx, uint16_t nb_desc,
1374 unsigned int socket_id,
1375 const struct rte_eth_rxconf *rx_conf,
1376 struct rte_mempool *mp)
1378 const struct rte_memzone *tz;
1379 struct nfp_net_rxq *rxq;
1380 struct nfp_net_hw *hw;
1382 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384 PMD_INIT_FUNC_TRACE();
1386 /* Validating number of descriptors */
1387 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1388 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1389 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1390 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1395 * Free memory prior to re-allocation if needed. This is the case after
1396 * calling nfp_net_stop
1398 if (dev->data->rx_queues[queue_idx]) {
1399 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1400 dev->data->rx_queues[queue_idx] = NULL;
1403 /* Allocating rx queue data structure */
1404 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1405 RTE_CACHE_LINE_SIZE, socket_id);
1409 /* Hw queues mapping based on firmware confifguration */
1410 rxq->qidx = queue_idx;
1411 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1412 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1413 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1414 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1417 * Tracking mbuf size for detecting a potential mbuf overflow due to
1421 rxq->mbuf_size = rxq->mem_pool->elt_size;
1422 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1423 hw->flbufsz = rxq->mbuf_size;
1425 rxq->rx_count = nb_desc;
1426 rxq->port_id = dev->data->port_id;
1427 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1428 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1430 rxq->drop_en = rx_conf->rx_drop_en;
1433 * Allocate RX ring hardware descriptors. A memzone large enough to
1434 * handle the maximum ring size is allocated in order to allow for
1435 * resizing in later calls to the queue setup function.
1437 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1438 sizeof(struct nfp_net_rx_desc) *
1439 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1443 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1444 nfp_net_rx_queue_release(rxq);
1448 /* Saving physical and virtual addresses for the RX ring */
1449 rxq->dma = (uint64_t)tz->phys_addr;
1450 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1452 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1453 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1454 sizeof(*rxq->rxbufs) * nb_desc,
1455 RTE_CACHE_LINE_SIZE, socket_id);
1456 if (rxq->rxbufs == NULL) {
1457 nfp_net_rx_queue_release(rxq);
1461 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1462 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1464 nfp_net_reset_rx_queue(rxq);
1466 dev->data->rx_queues[queue_idx] = rxq;
1470 * Telling the HW about the physical address of the RX ring and number
1471 * of descriptors in log2 format
1473 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1474 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1480 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1482 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1486 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1489 for (i = 0; i < rxq->rx_count; i++) {
1490 struct nfp_net_rx_desc *rxd;
1491 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1494 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1495 (unsigned)rxq->qidx);
1499 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1501 rxd = &rxq->rxds[i];
1503 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1504 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1506 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1509 /* Make sure all writes are flushed before telling the hardware */
1512 /* Not advertising the whole ring as the firmware gets confused if so */
1513 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1516 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1522 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1523 uint16_t nb_desc, unsigned int socket_id,
1524 const struct rte_eth_txconf *tx_conf)
1526 const struct rte_memzone *tz;
1527 struct nfp_net_txq *txq;
1528 uint16_t tx_free_thresh;
1529 struct nfp_net_hw *hw;
1531 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1533 PMD_INIT_FUNC_TRACE();
1535 /* Validating number of descriptors */
1536 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1537 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1538 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1539 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1543 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1544 tx_conf->tx_free_thresh :
1545 DEFAULT_TX_FREE_THRESH);
1547 if (tx_free_thresh > (nb_desc)) {
1549 "tx_free_thresh must be less than the number of TX "
1550 "descriptors. (tx_free_thresh=%u port=%d "
1551 "queue=%d)\n", (unsigned int)tx_free_thresh,
1552 (int)dev->data->port_id, (int)queue_idx);
1557 * Free memory prior to re-allocation if needed. This is the case after
1558 * calling nfp_net_stop
1560 if (dev->data->tx_queues[queue_idx]) {
1561 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1563 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1564 dev->data->tx_queues[queue_idx] = NULL;
1567 /* Allocating tx queue data structure */
1568 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1569 RTE_CACHE_LINE_SIZE, socket_id);
1571 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1576 * Allocate TX ring hardware descriptors. A memzone large enough to
1577 * handle the maximum ring size is allocated in order to allow for
1578 * resizing in later calls to the queue setup function.
1580 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1581 sizeof(struct nfp_net_tx_desc) *
1582 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1585 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1586 nfp_net_tx_queue_release(txq);
1590 txq->tx_count = nb_desc;
1591 txq->tx_free_thresh = tx_free_thresh;
1592 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1593 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1594 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1596 /* queue mapping based on firmware configuration */
1597 txq->qidx = queue_idx;
1598 txq->tx_qcidx = queue_idx * hw->stride_tx;
1599 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1601 txq->port_id = dev->data->port_id;
1602 txq->txq_flags = tx_conf->txq_flags;
1604 /* Saving physical and virtual addresses for the TX ring */
1605 txq->dma = (uint64_t)tz->phys_addr;
1606 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1608 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1609 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1610 sizeof(*txq->txbufs) * nb_desc,
1611 RTE_CACHE_LINE_SIZE, socket_id);
1612 if (txq->txbufs == NULL) {
1613 nfp_net_tx_queue_release(txq);
1616 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1617 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1619 nfp_net_reset_tx_queue(txq);
1621 dev->data->tx_queues[queue_idx] = txq;
1625 * Telling the HW about the physical address of the TX ring and number
1626 * of descriptors in log2 format
1628 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1629 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1634 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1636 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1637 struct rte_mbuf *mb)
1640 struct nfp_net_hw *hw = txq->hw;
1642 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
1645 ol_flags = mb->ol_flags;
1647 if (!(ol_flags & PKT_TX_TCP_SEG))
1650 txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
1651 txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
1652 txd->flags = PCIE_DESC_TX_LSO;
1661 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1663 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1664 struct rte_mbuf *mb)
1667 struct nfp_net_hw *hw = txq->hw;
1669 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1672 ol_flags = mb->ol_flags;
1674 /* IPv6 does not need checksum */
1675 if (ol_flags & PKT_TX_IP_CKSUM)
1676 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1678 switch (ol_flags & PKT_TX_L4_MASK) {
1679 case PKT_TX_UDP_CKSUM:
1680 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1682 case PKT_TX_TCP_CKSUM:
1683 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1687 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1688 txd->flags |= PCIE_DESC_TX_CSUM;
1691 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1693 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1694 struct rte_mbuf *mb)
1696 struct nfp_net_hw *hw = rxq->hw;
1698 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1701 /* If IPv4 and IP checksum error, fail */
1702 if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1703 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1704 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1706 /* If neither UDP nor TCP return */
1707 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1708 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1711 if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1712 !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1713 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1715 if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1716 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1717 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1720 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1721 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1724 * nfp_net_set_hash - Set mbuf hash data
1726 * The RSS hash and hash-type are pre-pended to the packet data.
1727 * Extract and decode it and set the mbuf fields.
1730 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1731 struct rte_mbuf *mbuf)
1735 struct nfp_net_hw *hw = rxq->hw;
1737 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1740 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1743 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1744 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1746 mbuf->hash.rss = hash;
1747 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1749 switch (hash_type) {
1750 case NFP_NET_RSS_IPV4:
1751 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1753 case NFP_NET_RSS_IPV6:
1754 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1756 case NFP_NET_RSS_IPV6_EX:
1757 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1760 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1765 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1767 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1770 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1775 * There are some decissions to take:
1776 * 1) How to check DD RX descriptors bit
1777 * 2) How and when to allocate new mbufs
1779 * Current implementation checks just one single DD bit each loop. As each
1780 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1781 * a single cache line instead. Tests with this change have not shown any
1782 * performance improvement but it requires further investigation. For example,
1783 * depending on which descriptor is next, the number of descriptors could be
1784 * less than 8 for just checking those in the same cache line. This implies
1785 * extra work which could be counterproductive by itself. Indeed, last firmware
1786 * changes are just doing this: writing several descriptors with the DD bit
1787 * for saving PCIe bandwidth and DMA operations from the NFP.
1789 * Mbuf allocation is done when a new packet is received. Then the descriptor
1790 * is automatically linked with the new mbuf and the old one is given to the
1791 * user. The main drawback with this design is mbuf allocation is heavier than
1792 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1793 * cache point of view it does not seem allocating the mbuf early on as we are
1794 * doing now have any benefit at all. Again, tests with this change have not
1795 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1796 * so looking at the implications of this type of allocation should be studied
1801 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1803 struct nfp_net_rxq *rxq;
1804 struct nfp_net_rx_desc *rxds;
1805 struct nfp_net_rx_buff *rxb;
1806 struct nfp_net_hw *hw;
1807 struct rte_mbuf *mb;
1808 struct rte_mbuf *new_mb;
1814 if (unlikely(rxq == NULL)) {
1816 * DPDK just checks the queue is lower than max queues
1817 * enabled. But the queue needs to be configured
1819 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
1827 while (avail < nb_pkts) {
1828 rxb = &rxq->rxbufs[rxq->rd_p];
1829 if (unlikely(rxb == NULL)) {
1830 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
1835 * Memory barrier to ensure that we won't do other
1836 * reads before the DD bit.
1840 rxds = &rxq->rxds[rxq->rd_p];
1841 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1845 * We got a packet. Let's alloc a new mbuff for refilling the
1846 * free descriptor ring as soon as possible
1848 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
1849 if (unlikely(new_mb == NULL)) {
1850 RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
1851 "queue_id=%u\n", (unsigned)rxq->port_id,
1852 (unsigned)rxq->qidx);
1853 nfp_net_mbuf_alloc_failed(rxq);
1860 * Grab the mbuff and refill the descriptor with the
1861 * previously allocated mbuff
1866 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
1867 rxds->rxd.data_len, rxq->mbuf_size);
1869 /* Size of this segment */
1870 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1871 /* Size of the whole packet. We just support 1 segment */
1872 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1874 if (unlikely((mb->data_len + hw->rx_offset) >
1877 * This should not happen and the user has the
1878 * responsibility of avoiding it. But we have
1879 * to give some info about the error
1881 RTE_LOG_DP(ERR, PMD,
1882 "mbuf overflow likely due to the RX offset.\n"
1883 "\t\tYour mbuf size should have extra space for"
1884 " RX offset=%u bytes.\n"
1885 "\t\tCurrently you just have %u bytes available"
1886 " but the received packet is %u bytes long",
1888 rxq->mbuf_size - hw->rx_offset,
1893 /* Filling the received mbuff with packet info */
1895 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
1897 mb->data_off = RTE_PKTMBUF_HEADROOM +
1898 NFP_DESC_META_LEN(rxds);
1900 /* No scatter mode supported */
1904 /* Checking the RSS flag */
1905 nfp_net_set_hash(rxq, rxds, mb);
1907 /* Checking the checksum flag */
1908 nfp_net_rx_cksum(rxq, rxds, mb);
1910 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
1911 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
1912 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
1913 mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1916 /* Adding the mbuff to the mbuff array passed by the app */
1917 rx_pkts[avail++] = mb;
1919 /* Now resetting and updating the descriptor */
1922 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
1924 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1925 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
1928 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
1935 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
1936 (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
1938 nb_hold += rxq->nb_rx_hold;
1941 * FL descriptors needs to be written before incrementing the
1942 * FL queue WR pointer
1945 if (nb_hold > rxq->rx_free_thresh) {
1946 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
1947 (unsigned)rxq->port_id, (unsigned)rxq->qidx,
1948 (unsigned)nb_hold, (unsigned)avail);
1949 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
1952 rxq->nb_rx_hold = nb_hold;
1958 * nfp_net_tx_free_bufs - Check for descriptors with a complete
1960 * @txq: TX queue to work with
1961 * Returns number of descriptors freed
1964 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
1969 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
1970 " status\n", txq->qidx);
1972 /* Work out how many packets have been sent */
1973 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
1975 if (qcp_rd_p == txq->rd_p) {
1976 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
1977 "packets (%u, %u)\n", txq->qidx,
1978 qcp_rd_p, txq->rd_p);
1982 if (qcp_rd_p > txq->rd_p)
1983 todo = qcp_rd_p - txq->rd_p;
1985 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
1987 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
1988 qcp_rd_p, txq->rd_p, txq->rd_p);
1994 if (unlikely(txq->rd_p >= txq->tx_count))
1995 txq->rd_p -= txq->tx_count;
2000 /* Leaving always free descriptors for avoiding wrapping confusion */
2002 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2004 if (txq->wr_p >= txq->rd_p)
2005 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2007 return txq->rd_p - txq->wr_p - 8;
2011 * nfp_net_txq_full - Check if the TX queue free descriptors
2012 * is below tx_free_threshold
2014 * @txq: TX queue to check
2016 * This function uses the host copy* of read/write pointers
2019 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2021 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2025 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2027 struct nfp_net_txq *txq;
2028 struct nfp_net_hw *hw;
2029 struct nfp_net_tx_desc *txds, txd;
2030 struct rte_mbuf *pkt;
2032 int pkt_size, dma_size;
2033 uint16_t free_descs, issued_descs;
2034 struct rte_mbuf **lmbuf;
2039 txds = &txq->txds[txq->wr_p];
2041 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
2042 txq->qidx, txq->wr_p, nb_pkts);
2044 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2045 nfp_net_tx_free_bufs(txq);
2047 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2048 if (unlikely(free_descs == 0))
2055 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
2056 txq->qidx, nb_pkts);
2057 /* Sending packets */
2058 while ((i < nb_pkts) && free_descs) {
2059 /* Grabbing the mbuf linked to the current descriptor */
2060 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2061 /* Warming the cache for releasing the mbuf later on */
2062 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2064 pkt = *(tx_pkts + i);
2066 if (unlikely((pkt->nb_segs > 1) &&
2067 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2068 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2069 rte_panic("Multisegment packet unsupported\n");
2072 /* Checking if we have enough descriptors */
2073 if (unlikely(pkt->nb_segs > free_descs))
2077 * Checksum and VLAN flags just in the first descriptor for a
2078 * multisegment packet, but TSO info needs to be in all of them.
2080 txd.data_len = pkt->pkt_len;
2081 nfp_net_tx_tso(txq, &txd, pkt);
2082 nfp_net_tx_cksum(txq, &txd, pkt);
2084 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2085 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2086 txd.flags |= PCIE_DESC_TX_VLAN;
2087 txd.vlan = pkt->vlan_tci;
2091 * mbuf data_len is the data in one segment and pkt_len data
2092 * in the whole packet. When the packet is just one segment,
2093 * then data_len = pkt_len
2095 pkt_size = pkt->pkt_len;
2098 /* Copying TSO, VLAN and cksum info */
2101 /* Releasing mbuf used by this descriptor previously*/
2103 rte_pktmbuf_free_seg(*lmbuf);
2106 * Linking mbuf with descriptor for being released
2107 * next time descriptor is used
2111 dma_size = pkt->data_len;
2112 dma_addr = rte_mbuf_data_dma_addr(pkt);
2113 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2114 "%" PRIx64 "\n", dma_addr);
2116 /* Filling descriptors fields */
2117 txds->dma_len = dma_size;
2118 txds->data_len = txd.data_len;
2119 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2120 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2121 ASSERT(free_descs > 0);
2125 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2128 pkt_size -= dma_size;
2131 txds->offset_eop |= PCIE_DESC_TX_EOP;
2133 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
2136 /* Referencing next free TX descriptor */
2137 txds = &txq->txds[txq->wr_p];
2138 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2145 /* Increment write pointers. Force memory write before we let HW know */
2147 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2153 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2155 uint32_t new_ctrl, update;
2156 struct nfp_net_hw *hw;
2158 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2161 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2162 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2163 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2164 " ETH_VLAN_EXTEND_OFFLOAD");
2166 /* Enable vlan strip if it is not configured yet */
2167 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2168 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2169 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2171 /* Disable vlan strip just if it is configured */
2172 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2173 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2174 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2179 update = NFP_NET_CFG_UPDATE_GEN;
2181 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
2184 hw->ctrl = new_ctrl;
2187 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2189 nfp_net_reta_update(struct rte_eth_dev *dev,
2190 struct rte_eth_rss_reta_entry64 *reta_conf,
2193 uint32_t reta, mask;
2197 struct nfp_net_hw *hw =
2198 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2200 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2203 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2204 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2205 "(%d) doesn't match the number hardware can supported "
2206 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2211 * Update Redirection Table. There are 128 8bit-entries which can be
2212 * manage as 32 32bit-entries
2214 for (i = 0; i < reta_size; i += 4) {
2215 /* Handling 4 RSS entries per loop */
2216 idx = i / RTE_RETA_GROUP_SIZE;
2217 shift = i % RTE_RETA_GROUP_SIZE;
2218 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2224 /* If all 4 entries were set, don't need read RETA register */
2226 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2228 for (j = 0; j < 4; j++) {
2229 if (!(mask & (0x1 << j)))
2232 /* Clearing the entry bits */
2233 reta &= ~(0xFF << (8 * j));
2234 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2236 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
2239 update = NFP_NET_CFG_UPDATE_RSS;
2241 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2247 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2249 nfp_net_reta_query(struct rte_eth_dev *dev,
2250 struct rte_eth_rss_reta_entry64 *reta_conf,
2256 struct nfp_net_hw *hw;
2258 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2260 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2263 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2264 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2265 "(%d) doesn't match the number hardware can supported "
2266 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2271 * Reading Redirection Table. There are 128 8bit-entries which can be
2272 * manage as 32 32bit-entries
2274 for (i = 0; i < reta_size; i += 4) {
2275 /* Handling 4 RSS entries per loop */
2276 idx = i / RTE_RETA_GROUP_SIZE;
2277 shift = i % RTE_RETA_GROUP_SIZE;
2278 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2283 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
2284 for (j = 0; j < 4; j++) {
2285 if (!(mask & (0x1 << j)))
2287 reta_conf->reta[shift + j] =
2288 (uint8_t)((reta >> (8 * j)) & 0xF);
2295 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2296 struct rte_eth_rss_conf *rss_conf)
2299 uint32_t cfg_rss_ctrl = 0;
2303 struct nfp_net_hw *hw;
2305 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307 rss_hf = rss_conf->rss_hf;
2309 /* Checking if RSS is enabled */
2310 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2311 if (rss_hf != 0) { /* Enable RSS? */
2312 RTE_LOG(ERR, PMD, "RSS unsupported\n");
2315 return 0; /* Nothing to do */
2318 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2319 RTE_LOG(ERR, PMD, "hash key too long\n");
2323 if (rss_hf & ETH_RSS_IPV4)
2324 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
2325 NFP_NET_CFG_RSS_IPV4_TCP |
2326 NFP_NET_CFG_RSS_IPV4_UDP;
2328 if (rss_hf & ETH_RSS_IPV6)
2329 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
2330 NFP_NET_CFG_RSS_IPV6_TCP |
2331 NFP_NET_CFG_RSS_IPV6_UDP;
2333 /* configuring where to apply the RSS hash */
2334 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2336 /* Writing the key byte a byte */
2337 for (i = 0; i < rss_conf->rss_key_len; i++) {
2338 memcpy(&key, &rss_conf->rss_key[i], 1);
2339 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2342 /* Writing the key size */
2343 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2345 update = NFP_NET_CFG_UPDATE_RSS;
2347 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2354 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2355 struct rte_eth_rss_conf *rss_conf)
2358 uint32_t cfg_rss_ctrl;
2361 struct nfp_net_hw *hw;
2363 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2365 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2368 rss_hf = rss_conf->rss_hf;
2369 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2371 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2372 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2374 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2375 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2377 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2378 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2380 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2381 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2383 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2384 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2386 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2387 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2389 /* Reading the key size */
2390 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2392 /* Reading the key byte a byte */
2393 for (i = 0; i < rss_conf->rss_key_len; i++) {
2394 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2395 memcpy(&rss_conf->rss_key[i], &key, 1);
2401 /* Initialise and register driver with DPDK Application */
2402 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2403 .dev_configure = nfp_net_configure,
2404 .dev_start = nfp_net_start,
2405 .dev_stop = nfp_net_stop,
2406 .dev_close = nfp_net_close,
2407 .promiscuous_enable = nfp_net_promisc_enable,
2408 .promiscuous_disable = nfp_net_promisc_disable,
2409 .link_update = nfp_net_link_update,
2410 .stats_get = nfp_net_stats_get,
2411 .stats_reset = nfp_net_stats_reset,
2412 .dev_infos_get = nfp_net_infos_get,
2413 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2414 .mtu_set = nfp_net_dev_mtu_set,
2415 .vlan_offload_set = nfp_net_vlan_offload_set,
2416 .reta_update = nfp_net_reta_update,
2417 .reta_query = nfp_net_reta_query,
2418 .rss_hash_update = nfp_net_rss_hash_update,
2419 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2420 .rx_queue_setup = nfp_net_rx_queue_setup,
2421 .rx_queue_release = nfp_net_rx_queue_release,
2422 .rx_queue_count = nfp_net_rx_queue_count,
2423 .tx_queue_setup = nfp_net_tx_queue_setup,
2424 .tx_queue_release = nfp_net_tx_queue_release,
2425 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2426 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2430 nfp_net_init(struct rte_eth_dev *eth_dev)
2432 struct rte_pci_device *pci_dev;
2433 struct nfp_net_hw *hw;
2435 uint32_t tx_bar_off, rx_bar_off;
2439 PMD_INIT_FUNC_TRACE();
2441 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2443 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2444 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2445 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2447 /* For secondary processes, the primary has done all the work */
2448 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2451 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2452 rte_eth_copy_pci_info(eth_dev, pci_dev);
2453 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
2455 hw->device_id = pci_dev->id.device_id;
2456 hw->vendor_id = pci_dev->id.vendor_id;
2457 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2458 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2460 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2461 pci_dev->id.vendor_id, pci_dev->id.device_id,
2462 pci_dev->addr.domain, pci_dev->addr.bus,
2463 pci_dev->addr.devid, pci_dev->addr.function);
2465 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2466 if (hw->ctrl_bar == NULL) {
2468 "hw->ctrl_bar is NULL. BAR0 not configured\n");
2471 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2472 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2474 /* Work out where in the BAR the queues start. */
2475 switch (pci_dev->id.device_id) {
2476 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2477 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2478 tx_bar_off = NFP_PCIE_QUEUE(start_q);
2479 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2480 rx_bar_off = NFP_PCIE_QUEUE(start_q);
2483 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2487 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
2488 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
2490 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
2491 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
2493 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2494 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2496 nfp_net_cfg_queue_setup(hw);
2498 /* Get some of the read-only fields from the config BAR */
2499 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2500 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2501 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2502 hw->mtu = hw->max_mtu;
2504 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2505 hw->rx_offset = NFP_NET_RX_OFFSET;
2507 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2509 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
2510 hw->ver, hw->max_mtu);
2511 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
2512 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2513 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2514 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2515 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2516 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2517 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2518 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2519 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2520 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2524 hw->stride_rx = stride;
2525 hw->stride_tx = stride;
2527 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2528 hw->max_rx_queues, hw->max_tx_queues);
2530 /* Initializing spinlock for reconfigs */
2531 rte_spinlock_init(&hw->reconfig_lock);
2533 /* Allocating memory for mac addr */
2534 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2535 if (eth_dev->data->mac_addrs == NULL) {
2536 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2540 nfp_net_read_mac(hw);
2542 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
2543 /* Using random mac addresses for VFs */
2544 eth_random_addr(&hw->mac_addr[0]);
2546 /* Copying mac address to DPDK eth_dev struct */
2547 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2548 ð_dev->data->mac_addrs[0]);
2550 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2551 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2552 eth_dev->data->port_id, pci_dev->id.vendor_id,
2553 pci_dev->id.device_id,
2554 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2555 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2557 /* Registering LSC interrupt handler */
2558 rte_intr_callback_register(&pci_dev->intr_handle,
2559 nfp_net_dev_interrupt_handler,
2562 /* Telling the firmware about the LSC interrupt entry */
2563 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2565 /* Recording current stats counters values */
2566 nfp_net_stats_reset(eth_dev);
2571 static const struct rte_pci_id pci_id_nfp_net_map[] = {
2573 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2574 PCI_DEVICE_ID_NFP6000_PF_NIC)
2577 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2578 PCI_DEVICE_ID_NFP6000_VF_NIC)
2585 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2586 struct rte_pci_device *pci_dev)
2588 return rte_eth_dev_pci_generic_probe(pci_dev,
2589 sizeof(struct nfp_net_adapter), nfp_net_init);
2592 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2594 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2597 static struct rte_pci_driver rte_nfp_net_pmd = {
2598 .id_table = pci_id_nfp_net_map,
2599 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2600 .probe = eth_nfp_pci_probe,
2601 .remove = eth_nfp_pci_remove,
2604 RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
2605 RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
2606 RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci");
2610 * c-file-style: "Linux"
2611 * indent-tabs-mode: t