2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev_driver.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
58 #include "nfpcore/nfp_cpp.h"
59 #include "nfpcore/nfp_nffw.h"
60 #include "nfpcore/nfp_hwinfo.h"
61 #include "nfpcore/nfp_mip.h"
62 #include "nfpcore/nfp_rtsym.h"
63 #include "nfpcore/nfp_nsp.h"
65 #include "nfp_net_pmd.h"
66 #include "nfp_net_logs.h"
67 #include "nfp_net_ctrl.h"
70 static void nfp_net_close(struct rte_eth_dev *dev);
71 static int nfp_net_configure(struct rte_eth_dev *dev);
72 static void nfp_net_dev_interrupt_handler(void *param);
73 static void nfp_net_dev_interrupt_delayed_handler(void *param);
74 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
75 static void nfp_net_infos_get(struct rte_eth_dev *dev,
76 struct rte_eth_dev_info *dev_info);
77 static int nfp_net_init(struct rte_eth_dev *eth_dev);
78 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
79 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
80 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
81 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
82 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
84 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
86 static void nfp_net_rx_queue_release(void *rxq);
87 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
88 uint16_t nb_desc, unsigned int socket_id,
89 const struct rte_eth_rxconf *rx_conf,
90 struct rte_mempool *mp);
91 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
92 static void nfp_net_tx_queue_release(void *txq);
93 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
94 uint16_t nb_desc, unsigned int socket_id,
95 const struct rte_eth_txconf *tx_conf);
96 static int nfp_net_start(struct rte_eth_dev *dev);
97 static int nfp_net_stats_get(struct rte_eth_dev *dev,
98 struct rte_eth_stats *stats);
99 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
100 static void nfp_net_stop(struct rte_eth_dev *dev);
101 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
104 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
105 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
106 struct rte_eth_rss_conf *rss_conf);
107 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
113 /* The offset of the queue controller queues in the PCIe Target */
114 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
116 /* Maximum value which can be added to a queue with one transaction */
117 #define NFP_QCP_MAX_ADD 0x7f
119 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
120 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
122 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
124 NFP_QCP_READ_PTR = 0,
129 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
130 * @q: Base address for queue structure
131 * @ptr: Add to the Read or Write pointer
132 * @val: Value to add to the queue pointer
134 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
137 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
141 if (ptr == NFP_QCP_READ_PTR)
142 off = NFP_QCP_QUEUE_ADD_RPTR;
144 off = NFP_QCP_QUEUE_ADD_WPTR;
146 while (val > NFP_QCP_MAX_ADD) {
147 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
148 val -= NFP_QCP_MAX_ADD;
151 nn_writel(rte_cpu_to_le_32(val), q + off);
155 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
156 * @q: Base address for queue structure
157 * @ptr: Read or Write pointer
159 static inline uint32_t
160 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
165 if (ptr == NFP_QCP_READ_PTR)
166 off = NFP_QCP_QUEUE_STS_LO;
168 off = NFP_QCP_QUEUE_STS_HI;
170 val = rte_cpu_to_le_32(nn_readl(q + off));
172 if (ptr == NFP_QCP_READ_PTR)
173 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
175 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
179 * Functions to read/write from/to Config BAR
180 * Performs any endian conversion necessary.
182 static inline uint8_t
183 nn_cfg_readb(struct nfp_net_hw *hw, int off)
185 return nn_readb(hw->ctrl_bar + off);
189 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
191 nn_writeb(val, hw->ctrl_bar + off);
194 static inline uint32_t
195 nn_cfg_readl(struct nfp_net_hw *hw, int off)
197 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
201 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
203 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
206 static inline uint64_t
207 nn_cfg_readq(struct nfp_net_hw *hw, int off)
209 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
213 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
215 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
219 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
223 if (rxq->rxbufs == NULL)
226 for (i = 0; i < rxq->rx_count; i++) {
227 if (rxq->rxbufs[i].mbuf) {
228 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
229 rxq->rxbufs[i].mbuf = NULL;
235 nfp_net_rx_queue_release(void *rx_queue)
237 struct nfp_net_rxq *rxq = rx_queue;
240 nfp_net_rx_queue_release_mbufs(rxq);
241 rte_free(rxq->rxbufs);
247 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
249 nfp_net_rx_queue_release_mbufs(rxq);
255 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
259 if (txq->txbufs == NULL)
262 for (i = 0; i < txq->tx_count; i++) {
263 if (txq->txbufs[i].mbuf) {
264 rte_pktmbuf_free(txq->txbufs[i].mbuf);
265 txq->txbufs[i].mbuf = NULL;
271 nfp_net_tx_queue_release(void *tx_queue)
273 struct nfp_net_txq *txq = tx_queue;
276 nfp_net_tx_queue_release_mbufs(txq);
277 rte_free(txq->txbufs);
283 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
285 nfp_net_tx_queue_release_mbufs(txq);
291 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
295 struct timespec wait;
297 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
300 if (hw->qcp_cfg == NULL)
301 rte_panic("Bad configuration queue pointer\n");
303 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
306 wait.tv_nsec = 1000000;
308 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
310 /* Poll update field, waiting for NFP to ack the config */
311 for (cnt = 0; ; cnt++) {
312 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
315 if (new & NFP_NET_CFG_UPDATE_ERR) {
316 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
319 if (cnt >= NFP_NET_POLL_TIMEOUT) {
320 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
321 " %dms", update, cnt);
322 rte_panic("Exiting\n");
324 nanosleep(&wait, 0); /* waiting for a 1ms */
326 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
331 * Reconfigure the NIC
332 * @nn: device to reconfigure
333 * @ctrl: The value for the ctrl field in the BAR config
334 * @update: The value for the update field in the BAR config
336 * Write the update word to the BAR and ping the reconfig queue. Then poll
337 * until the firmware has acknowledged the update by zeroing the update word.
340 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
344 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
347 rte_spinlock_lock(&hw->reconfig_lock);
349 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
350 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
354 err = __nfp_net_reconfig(hw, update);
356 rte_spinlock_unlock(&hw->reconfig_lock);
362 * Reconfig errors imply situations where they can be handled.
363 * Otherwise, rte_panic is called inside __nfp_net_reconfig
365 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
371 * Configure an Ethernet device. This function must be invoked first
372 * before any other function in the Ethernet API. This function can
373 * also be re-invoked when a device is in the stopped state.
376 nfp_net_configure(struct rte_eth_dev *dev)
378 struct rte_eth_conf *dev_conf;
379 struct rte_eth_rxmode *rxmode;
380 struct rte_eth_txmode *txmode;
381 struct nfp_net_hw *hw;
383 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
386 * A DPDK app sends info about how many queues to use and how
387 * those queues need to be configured. This is used by the
388 * DPDK core and it makes sure no more queues than those
389 * advertised by the driver are requested. This function is
390 * called after that internal process
393 PMD_INIT_LOG(DEBUG, "Configure");
395 dev_conf = &dev->data->dev_conf;
396 rxmode = &dev_conf->rxmode;
397 txmode = &dev_conf->txmode;
399 /* Checking TX mode */
400 if (txmode->mq_mode) {
401 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
405 /* Checking RX mode */
406 if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
407 !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
408 PMD_INIT_LOG(INFO, "RSS not supported");
412 /* Checking RX offloads */
413 if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
414 PMD_INIT_LOG(INFO, "rxmode does not support split header");
418 if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
419 !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
420 PMD_INIT_LOG(INFO, "RXCSUM not supported");
422 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
423 PMD_INIT_LOG(INFO, "VLAN filter not supported");
427 if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
428 !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
429 PMD_INIT_LOG(INFO, "hw vlan strip not supported");
433 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
434 PMD_INIT_LOG(INFO, "VLAN extended not supported");
438 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
439 PMD_INIT_LOG(INFO, "LRO not supported");
443 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
444 PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
448 if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
449 PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
453 if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
454 PMD_INIT_LOG(INFO, "MACSEC strip not supported");
458 if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
459 PMD_INIT_LOG(INFO, "MACSEC strip not supported");
463 if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
464 PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
466 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
467 !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
468 PMD_INIT_LOG(INFO, "Scatter not supported");
472 if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
473 PMD_INIT_LOG(INFO, "timestamp offfload not supported");
477 if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
478 PMD_INIT_LOG(INFO, "security offload not supported");
482 /* checking TX offloads */
483 if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
484 !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
485 PMD_INIT_LOG(INFO, "vlan insert offload not supported");
489 if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
490 !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
491 PMD_INIT_LOG(INFO, "TX checksum offload not supported");
495 if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
496 PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
500 if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
501 !(hw->cap & NFP_NET_CFG_CTRL_LSO)) {
502 PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
506 if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
507 PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
511 if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
512 PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
516 if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
517 PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
521 if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
522 txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
523 txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
524 txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
525 PMD_INIT_LOG(INFO, "tunneling offload not supported");
529 if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
530 PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
534 if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
535 PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
539 if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
540 !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
541 PMD_INIT_LOG(INFO, "TX multisegs not supported");
545 if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
546 PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
550 if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
551 PMD_INIT_LOG(INFO, "TX security offload not supported");
559 nfp_net_enable_queues(struct rte_eth_dev *dev)
561 struct nfp_net_hw *hw;
562 uint64_t enabled_queues = 0;
565 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
567 /* Enabling the required TX queues in the device */
568 for (i = 0; i < dev->data->nb_tx_queues; i++)
569 enabled_queues |= (1 << i);
571 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
575 /* Enabling the required RX queues in the device */
576 for (i = 0; i < dev->data->nb_rx_queues; i++)
577 enabled_queues |= (1 << i);
579 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
583 nfp_net_disable_queues(struct rte_eth_dev *dev)
585 struct nfp_net_hw *hw;
586 uint32_t new_ctrl, update = 0;
588 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
590 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
591 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
593 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
594 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
595 NFP_NET_CFG_UPDATE_MSIX;
597 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
598 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
600 /* If an error when reconfig we avoid to change hw state */
601 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
608 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
612 for (i = 0; i < dev->data->nb_rx_queues; i++) {
613 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
620 nfp_net_params_setup(struct nfp_net_hw *hw)
622 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
623 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
627 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
629 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
632 #define ETH_ADDR_LEN 6
635 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
639 for (i = 0; i < ETH_ADDR_LEN; i++)
644 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
646 struct nfp_eth_table *nfp_eth_table;
648 nfp_eth_table = nfp_eth_read_ports(hw->cpp);
650 * hw points to port0 private data. We need hw now pointing to
654 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
655 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
662 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
666 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
667 memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
669 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
670 memcpy(&hw->mac_addr[4], &tmp, 2);
674 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
676 uint32_t mac0 = *(uint32_t *)mac;
679 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
682 mac1 = *(uint16_t *)mac;
683 nn_writew(rte_cpu_to_be_16(mac1),
684 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
688 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
689 struct rte_intr_handle *intr_handle)
691 struct nfp_net_hw *hw;
694 if (!intr_handle->intr_vec) {
695 intr_handle->intr_vec =
696 rte_zmalloc("intr_vec",
697 dev->data->nb_rx_queues * sizeof(int), 0);
698 if (!intr_handle->intr_vec) {
699 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
700 " intr_vec", dev->data->nb_rx_queues);
705 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
707 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
708 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
709 /* UIO just supports one queue and no LSC*/
710 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
711 intr_handle->intr_vec[0] = 0;
713 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
714 for (i = 0; i < dev->data->nb_rx_queues; i++) {
716 * The first msix vector is reserved for non
719 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
720 intr_handle->intr_vec[i] = i + 1;
721 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
722 intr_handle->intr_vec[i]);
726 /* Avoiding TX interrupts */
727 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
732 nfp_check_offloads(struct rte_eth_dev *dev)
734 struct nfp_net_hw *hw;
735 struct rte_eth_conf *dev_conf;
736 struct rte_eth_rxmode *rxmode;
737 struct rte_eth_txmode *txmode;
740 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
742 dev_conf = &dev->data->dev_conf;
743 rxmode = &dev_conf->rxmode;
744 txmode = &dev_conf->txmode;
746 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
747 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
748 ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
751 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
752 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
753 ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
756 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
757 hw->mtu = rxmode->max_rx_pkt_len;
759 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
760 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
763 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
764 ctrl |= NFP_NET_CFG_CTRL_L2BC;
767 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
768 ctrl |= NFP_NET_CFG_CTRL_L2MC;
770 /* TX checksum offload */
771 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
772 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
773 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
774 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
777 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO)
778 ctrl |= NFP_NET_CFG_CTRL_LSO;
781 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
782 ctrl |= NFP_NET_CFG_CTRL_GATHER;
788 nfp_net_start(struct rte_eth_dev *dev)
790 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
791 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
792 uint32_t new_ctrl, update = 0;
793 struct nfp_net_hw *hw;
794 struct rte_eth_conf *dev_conf;
795 struct rte_eth_rxmode *rxmode;
796 uint32_t intr_vector;
799 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801 PMD_INIT_LOG(DEBUG, "Start");
803 /* Disabling queues just in case... */
804 nfp_net_disable_queues(dev);
806 /* Enabling the required queues in the device */
807 nfp_net_enable_queues(dev);
809 /* check and configure queue intr-vector mapping */
810 if (dev->data->dev_conf.intr_conf.rxq != 0) {
811 if (hw->pf_multiport_enabled) {
812 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
813 "with NFP multiport PF");
816 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
818 * Better not to share LSC with RX interrupts.
819 * Unregistering LSC interrupt handler
821 rte_intr_callback_unregister(&pci_dev->intr_handle,
822 nfp_net_dev_interrupt_handler, (void *)dev);
824 if (dev->data->nb_rx_queues > 1) {
825 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
826 "supports 1 queue with UIO");
830 intr_vector = dev->data->nb_rx_queues;
831 if (rte_intr_efd_enable(intr_handle, intr_vector))
834 nfp_configure_rx_interrupt(dev, intr_handle);
835 update = NFP_NET_CFG_UPDATE_MSIX;
838 rte_intr_enable(intr_handle);
840 new_ctrl = nfp_check_offloads(dev);
842 /* Writing configuration parameters in the device */
843 nfp_net_params_setup(hw);
845 dev_conf = &dev->data->dev_conf;
846 rxmode = &dev_conf->rxmode;
848 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
849 nfp_net_rss_config_default(dev);
850 update |= NFP_NET_CFG_UPDATE_RSS;
851 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
855 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
857 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
859 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
860 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
862 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
863 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
867 * Allocating rte mbuffs for configured rx queues.
868 * This requires queues being enabled before
870 if (nfp_net_rx_freelist_setup(dev) < 0) {
876 /* Configure the physical port up */
877 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
885 * An error returned by this function should mean the app
886 * exiting and then the system releasing all the memory
887 * allocated even memory coming from hugepages.
889 * The device could be enabled at this point with some queues
890 * ready for getting packets. This is true if the call to
891 * nfp_net_rx_freelist_setup() succeeds for some queues but
892 * fails for subsequent queues.
894 * This should make the app exiting but better if we tell the
897 nfp_net_disable_queues(dev);
902 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
904 nfp_net_stop(struct rte_eth_dev *dev)
907 struct nfp_net_hw *hw;
909 PMD_INIT_LOG(DEBUG, "Stop");
911 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
913 nfp_net_disable_queues(dev);
916 for (i = 0; i < dev->data->nb_tx_queues; i++) {
917 nfp_net_reset_tx_queue(
918 (struct nfp_net_txq *)dev->data->tx_queues[i]);
921 for (i = 0; i < dev->data->nb_rx_queues; i++) {
922 nfp_net_reset_rx_queue(
923 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
927 /* Configure the physical port down */
928 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
931 /* Reset and stop device. The device can not be restarted. */
933 nfp_net_close(struct rte_eth_dev *dev)
935 struct nfp_net_hw *hw;
936 struct rte_pci_device *pci_dev;
939 PMD_INIT_LOG(DEBUG, "Close");
941 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
945 * We assume that the DPDK application is stopping all the
946 * threads/queues before calling the device close function.
949 nfp_net_disable_queues(dev);
952 for (i = 0; i < dev->data->nb_tx_queues; i++) {
953 nfp_net_reset_tx_queue(
954 (struct nfp_net_txq *)dev->data->tx_queues[i]);
957 for (i = 0; i < dev->data->nb_rx_queues; i++) {
958 nfp_net_reset_rx_queue(
959 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
962 rte_intr_disable(&pci_dev->intr_handle);
963 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
965 /* unregister callback func from eal lib */
966 rte_intr_callback_unregister(&pci_dev->intr_handle,
967 nfp_net_dev_interrupt_handler,
971 * The ixgbe PMD driver disables the pcie master on the
972 * device. The i40e does not...
977 nfp_net_promisc_enable(struct rte_eth_dev *dev)
979 uint32_t new_ctrl, update = 0;
980 struct nfp_net_hw *hw;
982 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
984 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
987 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
991 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
992 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
996 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
997 update = NFP_NET_CFG_UPDATE_GEN;
1000 * DPDK sets promiscuous mode on just after this call assuming
1001 * it can not fail ...
1003 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
1006 hw->ctrl = new_ctrl;
1010 nfp_net_promisc_disable(struct rte_eth_dev *dev)
1012 uint32_t new_ctrl, update = 0;
1013 struct nfp_net_hw *hw;
1015 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1017 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
1018 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
1022 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
1023 update = NFP_NET_CFG_UPDATE_GEN;
1026 * DPDK sets promiscuous mode off just before this call
1027 * assuming it can not fail ...
1029 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
1032 hw->ctrl = new_ctrl;
1036 * return 0 means link status changed, -1 means not changed
1038 * Wait to complete is needed as it can take up to 9 seconds to get the Link
1042 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1044 struct nfp_net_hw *hw;
1045 struct rte_eth_link link;
1046 uint32_t nn_link_status;
1049 static const uint32_t ls_to_ethtool[] = {
1050 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1051 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
1052 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
1053 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
1054 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
1055 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
1056 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
1057 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
1060 PMD_DRV_LOG(DEBUG, "Link update\n");
1062 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1064 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1066 memset(&link, 0, sizeof(struct rte_eth_link));
1068 if (nn_link_status & NFP_NET_CFG_STS_LINK)
1069 link.link_status = ETH_LINK_UP;
1071 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1073 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1074 NFP_NET_CFG_STS_LINK_RATE_MASK;
1076 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1077 link.link_speed = ETH_SPEED_NUM_NONE;
1079 link.link_speed = ls_to_ethtool[nn_link_status];
1081 ret = rte_eth_linkstatus_set(dev, &link);
1083 if (link.link_status)
1084 PMD_DRV_LOG(INFO, "NIC Link is Up\n");
1086 PMD_DRV_LOG(INFO, "NIC Link is Down\n");
1092 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1095 struct nfp_net_hw *hw;
1096 struct rte_eth_stats nfp_dev_stats;
1098 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1100 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1102 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1104 /* reading per RX ring stats */
1105 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1106 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1109 nfp_dev_stats.q_ipackets[i] =
1110 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1112 nfp_dev_stats.q_ipackets[i] -=
1113 hw->eth_stats_base.q_ipackets[i];
1115 nfp_dev_stats.q_ibytes[i] =
1116 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1118 nfp_dev_stats.q_ibytes[i] -=
1119 hw->eth_stats_base.q_ibytes[i];
1122 /* reading per TX ring stats */
1123 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1124 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1127 nfp_dev_stats.q_opackets[i] =
1128 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1130 nfp_dev_stats.q_opackets[i] -=
1131 hw->eth_stats_base.q_opackets[i];
1133 nfp_dev_stats.q_obytes[i] =
1134 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1136 nfp_dev_stats.q_obytes[i] -=
1137 hw->eth_stats_base.q_obytes[i];
1140 nfp_dev_stats.ipackets =
1141 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1143 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1145 nfp_dev_stats.ibytes =
1146 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1148 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1150 nfp_dev_stats.opackets =
1151 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1153 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1155 nfp_dev_stats.obytes =
1156 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1158 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1160 /* reading general device stats */
1161 nfp_dev_stats.ierrors =
1162 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1164 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1166 nfp_dev_stats.oerrors =
1167 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1169 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1171 /* RX ring mbuf allocation failures */
1172 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1174 nfp_dev_stats.imissed =
1175 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1177 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1180 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1187 nfp_net_stats_reset(struct rte_eth_dev *dev)
1190 struct nfp_net_hw *hw;
1192 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1195 * hw->eth_stats_base records the per counter starting point.
1196 * Lets update it now
1199 /* reading per RX ring stats */
1200 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1201 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1204 hw->eth_stats_base.q_ipackets[i] =
1205 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1207 hw->eth_stats_base.q_ibytes[i] =
1208 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1211 /* reading per TX ring stats */
1212 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1213 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1216 hw->eth_stats_base.q_opackets[i] =
1217 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1219 hw->eth_stats_base.q_obytes[i] =
1220 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1223 hw->eth_stats_base.ipackets =
1224 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1226 hw->eth_stats_base.ibytes =
1227 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1229 hw->eth_stats_base.opackets =
1230 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1232 hw->eth_stats_base.obytes =
1233 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1235 /* reading general device stats */
1236 hw->eth_stats_base.ierrors =
1237 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1239 hw->eth_stats_base.oerrors =
1240 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1242 /* RX ring mbuf allocation failures */
1243 dev->data->rx_mbuf_alloc_failed = 0;
1245 hw->eth_stats_base.imissed =
1246 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1250 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1252 struct nfp_net_hw *hw;
1254 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1256 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1257 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1258 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1259 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1260 dev_info->max_rx_pktlen = hw->max_mtu;
1261 /* Next should change when PF support is implemented */
1262 dev_info->max_mac_addrs = 1;
1264 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1265 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1267 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1268 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1269 DEV_RX_OFFLOAD_UDP_CKSUM |
1270 DEV_RX_OFFLOAD_TCP_CKSUM;
1272 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1274 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1275 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1277 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1278 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1279 DEV_TX_OFFLOAD_UDP_CKSUM |
1280 DEV_TX_OFFLOAD_TCP_CKSUM;
1282 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
1283 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1285 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1286 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1288 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1290 .pthresh = DEFAULT_RX_PTHRESH,
1291 .hthresh = DEFAULT_RX_HTHRESH,
1292 .wthresh = DEFAULT_RX_WTHRESH,
1294 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1298 dev_info->default_txconf = (struct rte_eth_txconf) {
1300 .pthresh = DEFAULT_TX_PTHRESH,
1301 .hthresh = DEFAULT_TX_HTHRESH,
1302 .wthresh = DEFAULT_TX_WTHRESH,
1304 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1305 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1308 dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
1309 ETH_RSS_NONFRAG_IPV4_UDP |
1310 ETH_RSS_NONFRAG_IPV6_TCP |
1311 ETH_RSS_NONFRAG_IPV6_UDP;
1313 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1314 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1316 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1317 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1318 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1321 static const uint32_t *
1322 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1324 static const uint32_t ptypes[] = {
1325 /* refers to nfp_net_set_hash() */
1326 RTE_PTYPE_INNER_L3_IPV4,
1327 RTE_PTYPE_INNER_L3_IPV6,
1328 RTE_PTYPE_INNER_L3_IPV6_EXT,
1329 RTE_PTYPE_INNER_L4_MASK,
1333 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1339 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1341 struct nfp_net_rxq *rxq;
1342 struct nfp_net_rx_desc *rxds;
1346 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1353 * Other PMDs are just checking the DD bit in intervals of 4
1354 * descriptors and counting all four if the first has the DD
1355 * bit on. Of course, this is not accurate but can be good for
1356 * performance. But ideally that should be done in descriptors
1357 * chunks belonging to the same cache line
1360 while (count < rxq->rx_count) {
1361 rxds = &rxq->rxds[idx];
1362 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1369 if ((idx) == rxq->rx_count)
1377 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1379 struct rte_pci_device *pci_dev;
1380 struct nfp_net_hw *hw;
1383 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1386 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1389 /* Make sure all updates are written before un-masking */
1391 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1392 NFP_NET_CFG_ICR_UNMASKED);
1397 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1399 struct rte_pci_device *pci_dev;
1400 struct nfp_net_hw *hw;
1403 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1404 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1406 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1409 /* Make sure all updates are written before un-masking */
1411 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1416 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1418 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1419 struct rte_eth_link link;
1421 rte_eth_linkstatus_get(dev, &link);
1422 if (link.link_status)
1423 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1424 dev->data->port_id, link.link_speed,
1425 link.link_duplex == ETH_LINK_FULL_DUPLEX
1426 ? "full-duplex" : "half-duplex");
1428 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1429 dev->data->port_id);
1431 RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1432 pci_dev->addr.domain, pci_dev->addr.bus,
1433 pci_dev->addr.devid, pci_dev->addr.function);
1436 /* Interrupt configuration and handling */
1439 * nfp_net_irq_unmask - Unmask an interrupt
1441 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1442 * clear the ICR for the entry.
1445 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1447 struct nfp_net_hw *hw;
1448 struct rte_pci_device *pci_dev;
1450 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1451 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1453 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1454 /* If MSI-X auto-masking is used, clear the entry */
1456 rte_intr_enable(&pci_dev->intr_handle);
1458 /* Make sure all updates are written before un-masking */
1460 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1461 NFP_NET_CFG_ICR_UNMASKED);
1466 nfp_net_dev_interrupt_handler(void *param)
1469 struct rte_eth_link link;
1470 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1472 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1474 rte_eth_linkstatus_get(dev, &link);
1476 nfp_net_link_update(dev, 0);
1479 if (!link.link_status) {
1480 /* handle it 1 sec later, wait it being stable */
1481 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1482 /* likely to down */
1484 /* handle it 4 sec later, wait it being stable */
1485 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1488 if (rte_eal_alarm_set(timeout * 1000,
1489 nfp_net_dev_interrupt_delayed_handler,
1491 RTE_LOG(ERR, PMD, "Error setting alarm");
1493 nfp_net_irq_unmask(dev);
1498 * Interrupt handler which shall be registered for alarm callback for delayed
1499 * handling specific interrupt to wait for the stable nic state. As the NIC
1500 * interrupt state is not stable for nfp after link is just down, it needs
1501 * to wait 4 seconds to get the stable status.
1503 * @param handle Pointer to interrupt handle.
1504 * @param param The address of parameter (struct rte_eth_dev *)
1509 nfp_net_dev_interrupt_delayed_handler(void *param)
1511 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1513 nfp_net_link_update(dev, 0);
1514 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1516 nfp_net_dev_link_status_print(dev);
1519 nfp_net_irq_unmask(dev);
1523 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1525 struct nfp_net_hw *hw;
1527 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1529 /* check that mtu is within the allowed range */
1530 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1533 /* mtu setting is forbidden if port is started */
1534 if (dev->data->dev_started) {
1535 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1536 dev->data->port_id);
1540 /* switch to jumbo mode if needed */
1541 if ((uint32_t)mtu > ETHER_MAX_LEN)
1542 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1544 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1546 /* update max frame size */
1547 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1549 /* writing to configuration space */
1550 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1558 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1559 uint16_t queue_idx, uint16_t nb_desc,
1560 unsigned int socket_id,
1561 const struct rte_eth_rxconf *rx_conf,
1562 struct rte_mempool *mp)
1564 const struct rte_memzone *tz;
1565 struct nfp_net_rxq *rxq;
1566 struct nfp_net_hw *hw;
1567 struct rte_eth_conf *dev_conf;
1568 struct rte_eth_rxmode *rxmode;
1570 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1572 PMD_INIT_FUNC_TRACE();
1574 /* Validating number of descriptors */
1575 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1576 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1577 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1578 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1582 dev_conf = &dev->data->dev_conf;
1583 rxmode = &dev_conf->rxmode;
1585 if (rx_conf->offloads != rxmode->offloads) {
1586 RTE_LOG(ERR, PMD, "queue %u rx offloads not as port offloads\n",
1588 RTE_LOG(ERR, PMD, "\tport: %" PRIx64 "\n", rxmode->offloads);
1589 RTE_LOG(ERR, PMD, "\tqueue: %" PRIx64 "\n", rx_conf->offloads);
1594 * Free memory prior to re-allocation if needed. This is the case after
1595 * calling nfp_net_stop
1597 if (dev->data->rx_queues[queue_idx]) {
1598 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1599 dev->data->rx_queues[queue_idx] = NULL;
1602 /* Allocating rx queue data structure */
1603 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1604 RTE_CACHE_LINE_SIZE, socket_id);
1608 /* Hw queues mapping based on firmware confifguration */
1609 rxq->qidx = queue_idx;
1610 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1611 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1612 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1613 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1616 * Tracking mbuf size for detecting a potential mbuf overflow due to
1620 rxq->mbuf_size = rxq->mem_pool->elt_size;
1621 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1622 hw->flbufsz = rxq->mbuf_size;
1624 rxq->rx_count = nb_desc;
1625 rxq->port_id = dev->data->port_id;
1626 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1627 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1629 rxq->drop_en = rx_conf->rx_drop_en;
1632 * Allocate RX ring hardware descriptors. A memzone large enough to
1633 * handle the maximum ring size is allocated in order to allow for
1634 * resizing in later calls to the queue setup function.
1636 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1637 sizeof(struct nfp_net_rx_desc) *
1638 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1642 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1643 nfp_net_rx_queue_release(rxq);
1647 /* Saving physical and virtual addresses for the RX ring */
1648 rxq->dma = (uint64_t)tz->iova;
1649 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1651 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1652 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1653 sizeof(*rxq->rxbufs) * nb_desc,
1654 RTE_CACHE_LINE_SIZE, socket_id);
1655 if (rxq->rxbufs == NULL) {
1656 nfp_net_rx_queue_release(rxq);
1660 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1661 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1663 nfp_net_reset_rx_queue(rxq);
1665 dev->data->rx_queues[queue_idx] = rxq;
1669 * Telling the HW about the physical address of the RX ring and number
1670 * of descriptors in log2 format
1672 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1673 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1679 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1681 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1685 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1688 for (i = 0; i < rxq->rx_count; i++) {
1689 struct nfp_net_rx_desc *rxd;
1690 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1693 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1694 (unsigned)rxq->qidx);
1698 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1700 rxd = &rxq->rxds[i];
1702 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1703 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1705 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1708 /* Make sure all writes are flushed before telling the hardware */
1711 /* Not advertising the whole ring as the firmware gets confused if so */
1712 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1715 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1721 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1722 uint16_t nb_desc, unsigned int socket_id,
1723 const struct rte_eth_txconf *tx_conf)
1725 const struct rte_memzone *tz;
1726 struct nfp_net_txq *txq;
1727 uint16_t tx_free_thresh;
1728 struct nfp_net_hw *hw;
1729 struct rte_eth_conf *dev_conf;
1730 struct rte_eth_txmode *txmode;
1732 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1734 PMD_INIT_FUNC_TRACE();
1736 /* Validating number of descriptors */
1737 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1738 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1739 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1740 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1744 dev_conf = &dev->data->dev_conf;
1745 txmode = &dev_conf->txmode;
1747 if (tx_conf->offloads != txmode->offloads) {
1748 RTE_LOG(ERR, PMD, "queue %u tx offloads not as port offloads",
1753 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1754 tx_conf->tx_free_thresh :
1755 DEFAULT_TX_FREE_THRESH);
1757 if (tx_free_thresh > (nb_desc)) {
1759 "tx_free_thresh must be less than the number of TX "
1760 "descriptors. (tx_free_thresh=%u port=%d "
1761 "queue=%d)\n", (unsigned int)tx_free_thresh,
1762 dev->data->port_id, (int)queue_idx);
1767 * Free memory prior to re-allocation if needed. This is the case after
1768 * calling nfp_net_stop
1770 if (dev->data->tx_queues[queue_idx]) {
1771 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1773 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1774 dev->data->tx_queues[queue_idx] = NULL;
1777 /* Allocating tx queue data structure */
1778 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1779 RTE_CACHE_LINE_SIZE, socket_id);
1781 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1786 * Allocate TX ring hardware descriptors. A memzone large enough to
1787 * handle the maximum ring size is allocated in order to allow for
1788 * resizing in later calls to the queue setup function.
1790 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1791 sizeof(struct nfp_net_tx_desc) *
1792 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1795 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1796 nfp_net_tx_queue_release(txq);
1800 txq->tx_count = nb_desc;
1801 txq->tx_free_thresh = tx_free_thresh;
1802 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1803 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1804 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1806 /* queue mapping based on firmware configuration */
1807 txq->qidx = queue_idx;
1808 txq->tx_qcidx = queue_idx * hw->stride_tx;
1809 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1811 txq->port_id = dev->data->port_id;
1813 /* Saving physical and virtual addresses for the TX ring */
1814 txq->dma = (uint64_t)tz->iova;
1815 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1817 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1818 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1819 sizeof(*txq->txbufs) * nb_desc,
1820 RTE_CACHE_LINE_SIZE, socket_id);
1821 if (txq->txbufs == NULL) {
1822 nfp_net_tx_queue_release(txq);
1825 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1826 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1828 nfp_net_reset_tx_queue(txq);
1830 dev->data->tx_queues[queue_idx] = txq;
1834 * Telling the HW about the physical address of the TX ring and number
1835 * of descriptors in log2 format
1837 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1838 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1843 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1845 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1846 struct rte_mbuf *mb)
1849 struct nfp_net_hw *hw = txq->hw;
1851 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
1854 ol_flags = mb->ol_flags;
1856 if (!(ol_flags & PKT_TX_TCP_SEG))
1859 txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
1860 txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
1861 txd->flags = PCIE_DESC_TX_LSO;
1870 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1872 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1873 struct rte_mbuf *mb)
1876 struct nfp_net_hw *hw = txq->hw;
1878 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1881 ol_flags = mb->ol_flags;
1883 /* IPv6 does not need checksum */
1884 if (ol_flags & PKT_TX_IP_CKSUM)
1885 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1887 switch (ol_flags & PKT_TX_L4_MASK) {
1888 case PKT_TX_UDP_CKSUM:
1889 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1891 case PKT_TX_TCP_CKSUM:
1892 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1896 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1897 txd->flags |= PCIE_DESC_TX_CSUM;
1900 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1902 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1903 struct rte_mbuf *mb)
1905 struct nfp_net_hw *hw = rxq->hw;
1907 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1910 /* If IPv4 and IP checksum error, fail */
1911 if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1912 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1913 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1915 /* If neither UDP nor TCP return */
1916 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1917 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1920 if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1921 !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1922 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1924 if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1925 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1926 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1929 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1930 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1932 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1935 * nfp_net_set_hash - Set mbuf hash data
1937 * The RSS hash and hash-type are pre-pended to the packet data.
1938 * Extract and decode it and set the mbuf fields.
1941 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1942 struct rte_mbuf *mbuf)
1944 struct nfp_net_hw *hw = rxq->hw;
1945 uint8_t *meta_offset;
1948 uint32_t hash_type = 0;
1950 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1953 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
1954 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1957 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1958 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1960 } else if (NFP_DESC_META_LEN(rxd)) {
1963 * <---- 32 bit ----->
1968 * ====================
1971 * Field type word contains up to 8 4bit field types
1972 * A 4bit field type refers to a data field word
1973 * A data field word can have several 4bit field types
1975 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1976 meta_offset -= NFP_DESC_META_LEN(rxd);
1977 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1979 /* NFP PMD just supports metadata for hashing */
1980 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1981 case NFP_NET_META_HASH:
1982 /* next field type is about the hash type */
1983 meta_info >>= NFP_NET_META_FIELD_SIZE;
1984 /* hash value is in the data field */
1985 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1986 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1989 /* Unsupported metadata can be a performance issue */
1996 mbuf->hash.rss = hash;
1997 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1999 switch (hash_type) {
2000 case NFP_NET_RSS_IPV4:
2001 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
2003 case NFP_NET_RSS_IPV6:
2004 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
2006 case NFP_NET_RSS_IPV6_EX:
2007 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2010 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
2015 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
2017 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
2020 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
2025 * There are some decissions to take:
2026 * 1) How to check DD RX descriptors bit
2027 * 2) How and when to allocate new mbufs
2029 * Current implementation checks just one single DD bit each loop. As each
2030 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
2031 * a single cache line instead. Tests with this change have not shown any
2032 * performance improvement but it requires further investigation. For example,
2033 * depending on which descriptor is next, the number of descriptors could be
2034 * less than 8 for just checking those in the same cache line. This implies
2035 * extra work which could be counterproductive by itself. Indeed, last firmware
2036 * changes are just doing this: writing several descriptors with the DD bit
2037 * for saving PCIe bandwidth and DMA operations from the NFP.
2039 * Mbuf allocation is done when a new packet is received. Then the descriptor
2040 * is automatically linked with the new mbuf and the old one is given to the
2041 * user. The main drawback with this design is mbuf allocation is heavier than
2042 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
2043 * cache point of view it does not seem allocating the mbuf early on as we are
2044 * doing now have any benefit at all. Again, tests with this change have not
2045 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
2046 * so looking at the implications of this type of allocation should be studied
2051 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2053 struct nfp_net_rxq *rxq;
2054 struct nfp_net_rx_desc *rxds;
2055 struct nfp_net_rx_buff *rxb;
2056 struct nfp_net_hw *hw;
2057 struct rte_mbuf *mb;
2058 struct rte_mbuf *new_mb;
2064 if (unlikely(rxq == NULL)) {
2066 * DPDK just checks the queue is lower than max queues
2067 * enabled. But the queue needs to be configured
2069 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2077 while (avail < nb_pkts) {
2078 rxb = &rxq->rxbufs[rxq->rd_p];
2079 if (unlikely(rxb == NULL)) {
2080 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2084 rxds = &rxq->rxds[rxq->rd_p];
2085 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2089 * Memory barrier to ensure that we won't do other
2090 * reads before the DD bit.
2095 * We got a packet. Let's alloc a new mbuff for refilling the
2096 * free descriptor ring as soon as possible
2098 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2099 if (unlikely(new_mb == NULL)) {
2100 RTE_LOG_DP(DEBUG, PMD,
2101 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2102 rxq->port_id, (unsigned int)rxq->qidx);
2103 nfp_net_mbuf_alloc_failed(rxq);
2110 * Grab the mbuff and refill the descriptor with the
2111 * previously allocated mbuff
2116 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
2117 rxds->rxd.data_len, rxq->mbuf_size);
2119 /* Size of this segment */
2120 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2121 /* Size of the whole packet. We just support 1 segment */
2122 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2124 if (unlikely((mb->data_len + hw->rx_offset) >
2127 * This should not happen and the user has the
2128 * responsibility of avoiding it. But we have
2129 * to give some info about the error
2131 RTE_LOG_DP(ERR, PMD,
2132 "mbuf overflow likely due to the RX offset.\n"
2133 "\t\tYour mbuf size should have extra space for"
2134 " RX offset=%u bytes.\n"
2135 "\t\tCurrently you just have %u bytes available"
2136 " but the received packet is %u bytes long",
2138 rxq->mbuf_size - hw->rx_offset,
2143 /* Filling the received mbuff with packet info */
2145 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2147 mb->data_off = RTE_PKTMBUF_HEADROOM +
2148 NFP_DESC_META_LEN(rxds);
2150 /* No scatter mode supported */
2154 mb->port = rxq->port_id;
2156 /* Checking the RSS flag */
2157 nfp_net_set_hash(rxq, rxds, mb);
2159 /* Checking the checksum flag */
2160 nfp_net_rx_cksum(rxq, rxds, mb);
2162 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2163 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2164 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2165 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2168 /* Adding the mbuff to the mbuff array passed by the app */
2169 rx_pkts[avail++] = mb;
2171 /* Now resetting and updating the descriptor */
2174 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2176 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2177 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2180 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2187 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
2188 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2190 nb_hold += rxq->nb_rx_hold;
2193 * FL descriptors needs to be written before incrementing the
2194 * FL queue WR pointer
2197 if (nb_hold > rxq->rx_free_thresh) {
2198 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
2199 rxq->port_id, (unsigned int)rxq->qidx,
2200 (unsigned)nb_hold, (unsigned)avail);
2201 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2204 rxq->nb_rx_hold = nb_hold;
2210 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2212 * @txq: TX queue to work with
2213 * Returns number of descriptors freed
2216 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2221 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2222 " status\n", txq->qidx);
2224 /* Work out how many packets have been sent */
2225 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2227 if (qcp_rd_p == txq->rd_p) {
2228 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2229 "packets (%u, %u)\n", txq->qidx,
2230 qcp_rd_p, txq->rd_p);
2234 if (qcp_rd_p > txq->rd_p)
2235 todo = qcp_rd_p - txq->rd_p;
2237 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2239 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
2240 qcp_rd_p, txq->rd_p, txq->rd_p);
2246 if (unlikely(txq->rd_p >= txq->tx_count))
2247 txq->rd_p -= txq->tx_count;
2252 /* Leaving always free descriptors for avoiding wrapping confusion */
2254 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2256 if (txq->wr_p >= txq->rd_p)
2257 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2259 return txq->rd_p - txq->wr_p - 8;
2263 * nfp_net_txq_full - Check if the TX queue free descriptors
2264 * is below tx_free_threshold
2266 * @txq: TX queue to check
2268 * This function uses the host copy* of read/write pointers
2271 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2273 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2277 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2279 struct nfp_net_txq *txq;
2280 struct nfp_net_hw *hw;
2281 struct nfp_net_tx_desc *txds, txd;
2282 struct rte_mbuf *pkt;
2284 int pkt_size, dma_size;
2285 uint16_t free_descs, issued_descs;
2286 struct rte_mbuf **lmbuf;
2291 txds = &txq->txds[txq->wr_p];
2293 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
2294 txq->qidx, txq->wr_p, nb_pkts);
2296 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2297 nfp_net_tx_free_bufs(txq);
2299 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2300 if (unlikely(free_descs == 0))
2307 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
2308 txq->qidx, nb_pkts);
2309 /* Sending packets */
2310 while ((i < nb_pkts) && free_descs) {
2311 /* Grabbing the mbuf linked to the current descriptor */
2312 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2313 /* Warming the cache for releasing the mbuf later on */
2314 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2316 pkt = *(tx_pkts + i);
2318 if (unlikely((pkt->nb_segs > 1) &&
2319 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2320 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2321 rte_panic("Multisegment packet unsupported\n");
2324 /* Checking if we have enough descriptors */
2325 if (unlikely(pkt->nb_segs > free_descs))
2329 * Checksum and VLAN flags just in the first descriptor for a
2330 * multisegment packet, but TSO info needs to be in all of them.
2332 txd.data_len = pkt->pkt_len;
2333 nfp_net_tx_tso(txq, &txd, pkt);
2334 nfp_net_tx_cksum(txq, &txd, pkt);
2336 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2337 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2338 txd.flags |= PCIE_DESC_TX_VLAN;
2339 txd.vlan = pkt->vlan_tci;
2343 * mbuf data_len is the data in one segment and pkt_len data
2344 * in the whole packet. When the packet is just one segment,
2345 * then data_len = pkt_len
2347 pkt_size = pkt->pkt_len;
2350 /* Copying TSO, VLAN and cksum info */
2353 /* Releasing mbuf used by this descriptor previously*/
2355 rte_pktmbuf_free_seg(*lmbuf);
2358 * Linking mbuf with descriptor for being released
2359 * next time descriptor is used
2363 dma_size = pkt->data_len;
2364 dma_addr = rte_mbuf_data_iova(pkt);
2365 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2366 "%" PRIx64 "\n", dma_addr);
2368 /* Filling descriptors fields */
2369 txds->dma_len = dma_size;
2370 txds->data_len = txd.data_len;
2371 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2372 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2373 ASSERT(free_descs > 0);
2377 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2380 pkt_size -= dma_size;
2383 txds->offset_eop |= PCIE_DESC_TX_EOP;
2385 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
2388 /* Referencing next free TX descriptor */
2389 txds = &txq->txds[txq->wr_p];
2390 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2397 /* Increment write pointers. Force memory write before we let HW know */
2399 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2405 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2407 uint32_t new_ctrl, update;
2408 struct nfp_net_hw *hw;
2411 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2414 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2415 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2416 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2417 " ETH_VLAN_EXTEND_OFFLOAD");
2419 /* Enable vlan strip if it is not configured yet */
2420 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2421 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2422 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2424 /* Disable vlan strip just if it is configured */
2425 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2426 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2427 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2432 update = NFP_NET_CFG_UPDATE_GEN;
2434 ret = nfp_net_reconfig(hw, new_ctrl, update);
2436 hw->ctrl = new_ctrl;
2442 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2443 struct rte_eth_rss_reta_entry64 *reta_conf,
2446 uint32_t reta, mask;
2449 struct nfp_net_hw *hw =
2450 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2452 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2453 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2454 "(%d) doesn't match the number hardware can supported "
2455 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2460 * Update Redirection Table. There are 128 8bit-entries which can be
2461 * manage as 32 32bit-entries
2463 for (i = 0; i < reta_size; i += 4) {
2464 /* Handling 4 RSS entries per loop */
2465 idx = i / RTE_RETA_GROUP_SIZE;
2466 shift = i % RTE_RETA_GROUP_SIZE;
2467 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2473 /* If all 4 entries were set, don't need read RETA register */
2475 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2477 for (j = 0; j < 4; j++) {
2478 if (!(mask & (0x1 << j)))
2481 /* Clearing the entry bits */
2482 reta &= ~(0xFF << (8 * j));
2483 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2485 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2491 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2493 nfp_net_reta_update(struct rte_eth_dev *dev,
2494 struct rte_eth_rss_reta_entry64 *reta_conf,
2497 struct nfp_net_hw *hw =
2498 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2502 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2505 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2509 update = NFP_NET_CFG_UPDATE_RSS;
2511 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2517 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2519 nfp_net_reta_query(struct rte_eth_dev *dev,
2520 struct rte_eth_rss_reta_entry64 *reta_conf,
2526 struct nfp_net_hw *hw;
2528 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2530 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2533 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2534 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2535 "(%d) doesn't match the number hardware can supported "
2536 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2541 * Reading Redirection Table. There are 128 8bit-entries which can be
2542 * manage as 32 32bit-entries
2544 for (i = 0; i < reta_size; i += 4) {
2545 /* Handling 4 RSS entries per loop */
2546 idx = i / RTE_RETA_GROUP_SIZE;
2547 shift = i % RTE_RETA_GROUP_SIZE;
2548 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2553 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2555 for (j = 0; j < 4; j++) {
2556 if (!(mask & (0x1 << j)))
2558 reta_conf->reta[shift + j] =
2559 (uint8_t)((reta >> (8 * j)) & 0xF);
2566 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2567 struct rte_eth_rss_conf *rss_conf)
2569 struct nfp_net_hw *hw;
2571 uint32_t cfg_rss_ctrl = 0;
2575 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2577 /* Writing the key byte a byte */
2578 for (i = 0; i < rss_conf->rss_key_len; i++) {
2579 memcpy(&key, &rss_conf->rss_key[i], 1);
2580 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2583 rss_hf = rss_conf->rss_hf;
2585 if (rss_hf & ETH_RSS_IPV4)
2586 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
2587 NFP_NET_CFG_RSS_IPV4_TCP |
2588 NFP_NET_CFG_RSS_IPV4_UDP;
2590 if (rss_hf & ETH_RSS_IPV6)
2591 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
2592 NFP_NET_CFG_RSS_IPV6_TCP |
2593 NFP_NET_CFG_RSS_IPV6_UDP;
2595 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2596 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2598 /* configuring where to apply the RSS hash */
2599 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2601 /* Writing the key size */
2602 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2608 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2609 struct rte_eth_rss_conf *rss_conf)
2613 struct nfp_net_hw *hw;
2615 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2617 rss_hf = rss_conf->rss_hf;
2619 /* Checking if RSS is enabled */
2620 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2621 if (rss_hf != 0) { /* Enable RSS? */
2622 RTE_LOG(ERR, PMD, "RSS unsupported\n");
2625 return 0; /* Nothing to do */
2628 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2629 RTE_LOG(ERR, PMD, "hash key too long\n");
2633 nfp_net_rss_hash_write(dev, rss_conf);
2635 update = NFP_NET_CFG_UPDATE_RSS;
2637 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2644 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2645 struct rte_eth_rss_conf *rss_conf)
2648 uint32_t cfg_rss_ctrl;
2651 struct nfp_net_hw *hw;
2653 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2655 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2658 rss_hf = rss_conf->rss_hf;
2659 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2661 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2662 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2664 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2665 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2667 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2668 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2670 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2671 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2673 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2674 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2676 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2677 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2679 /* Reading the key size */
2680 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2682 /* Reading the key byte a byte */
2683 for (i = 0; i < rss_conf->rss_key_len; i++) {
2684 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2685 memcpy(&rss_conf->rss_key[i], &key, 1);
2692 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2694 struct rte_eth_conf *dev_conf;
2695 struct rte_eth_rss_conf rss_conf;
2696 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2697 uint16_t rx_queues = dev->data->nb_rx_queues;
2701 RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
2704 nfp_reta_conf[0].mask = ~0x0;
2705 nfp_reta_conf[1].mask = ~0x0;
2708 for (i = 0; i < 0x40; i += 8) {
2709 for (j = i; j < (i + 8); j++) {
2710 nfp_reta_conf[0].reta[j] = queue;
2711 nfp_reta_conf[1].reta[j] = queue++;
2715 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2719 dev_conf = &dev->data->dev_conf;
2721 RTE_LOG(INFO, PMD, "wrong rss conf");
2724 rss_conf = dev_conf->rx_adv_conf.rss_conf;
2726 ret = nfp_net_rss_hash_write(dev, &rss_conf);
2732 /* Initialise and register driver with DPDK Application */
2733 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2734 .dev_configure = nfp_net_configure,
2735 .dev_start = nfp_net_start,
2736 .dev_stop = nfp_net_stop,
2737 .dev_close = nfp_net_close,
2738 .promiscuous_enable = nfp_net_promisc_enable,
2739 .promiscuous_disable = nfp_net_promisc_disable,
2740 .link_update = nfp_net_link_update,
2741 .stats_get = nfp_net_stats_get,
2742 .stats_reset = nfp_net_stats_reset,
2743 .dev_infos_get = nfp_net_infos_get,
2744 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2745 .mtu_set = nfp_net_dev_mtu_set,
2746 .vlan_offload_set = nfp_net_vlan_offload_set,
2747 .reta_update = nfp_net_reta_update,
2748 .reta_query = nfp_net_reta_query,
2749 .rss_hash_update = nfp_net_rss_hash_update,
2750 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2751 .rx_queue_setup = nfp_net_rx_queue_setup,
2752 .rx_queue_release = nfp_net_rx_queue_release,
2753 .rx_queue_count = nfp_net_rx_queue_count,
2754 .tx_queue_setup = nfp_net_tx_queue_setup,
2755 .tx_queue_release = nfp_net_tx_queue_release,
2756 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2757 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2761 * All eth_dev created got its private data, but before nfp_net_init, that
2762 * private data is referencing private data for all the PF ports. This is due
2763 * to how the vNIC bars are mapped based on first port, so all ports need info
2764 * about port 0 private data. Inside nfp_net_init the private data pointer is
2765 * changed to the right address for each port once the bars have been mapped.
2767 * This functions helps to find out which port and therefore which offset
2768 * inside the private data array to use.
2771 get_pf_port_number(char *name)
2773 char *pf_str = name;
2776 while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2781 * This should not happen at all and it would mean major
2782 * implementation fault.
2784 rte_panic("nfp_net: problem with pf device name\n");
2786 /* Expecting _portX with X within [0,7] */
2789 return (int)strtol(pf_str, NULL, 10);
2793 nfp_net_init(struct rte_eth_dev *eth_dev)
2795 struct rte_pci_device *pci_dev;
2796 struct nfp_net_hw *hw, *hwport0;
2798 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2804 PMD_INIT_FUNC_TRACE();
2806 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2808 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2809 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2810 port = get_pf_port_number(eth_dev->data->name);
2811 if (port < 0 || port > 7) {
2812 RTE_LOG(ERR, PMD, "Port value is wrong\n");
2816 PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
2818 /* This points to port 0 private data */
2819 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2821 /* This points to the specific port private data */
2822 hw = &hwport0[port];
2824 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2828 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2829 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2830 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2832 /* For secondary processes, the primary has done all the work */
2833 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2836 rte_eth_copy_pci_info(eth_dev, pci_dev);
2838 hw->device_id = pci_dev->id.device_id;
2839 hw->vendor_id = pci_dev->id.vendor_id;
2840 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2841 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2843 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2844 pci_dev->id.vendor_id, pci_dev->id.device_id,
2845 pci_dev->addr.domain, pci_dev->addr.bus,
2846 pci_dev->addr.devid, pci_dev->addr.function);
2848 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2849 if (hw->ctrl_bar == NULL) {
2851 "hw->ctrl_bar is NULL. BAR0 not configured\n");
2855 if (hw->is_pf && port == 0) {
2856 hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
2857 hw->total_ports * 32768,
2859 if (!hw->ctrl_bar) {
2860 printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar\n");
2864 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2868 if (!hwport0->ctrl_bar)
2871 /* address based on port0 offset */
2872 hw->ctrl_bar = hwport0->ctrl_bar +
2873 (port * NFP_PF_CSR_SLICE_SIZE);
2876 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2878 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2879 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2881 /* Work out where in the BAR the queues start. */
2882 switch (pci_dev->id.device_id) {
2883 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2884 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2885 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2886 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2887 tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
2888 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2889 rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
2892 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2894 goto dev_err_ctrl_map;
2897 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
2898 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
2900 if (hw->is_pf && port == 0) {
2901 /* configure access to tx/rx vNIC BARs */
2902 hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
2904 NFP_QCP_QUEUE_AREA_SZ,
2905 &hw->hwqueues_area);
2907 if (!hwport0->hw_queues) {
2908 printf("nfp_rtsym_map fails for net.qc\n");
2910 goto dev_err_ctrl_map;
2913 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p\n",
2914 hwport0->hw_queues);
2918 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2919 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2920 eth_dev->data->dev_private = hw;
2922 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2924 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2928 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2929 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2931 nfp_net_cfg_queue_setup(hw);
2933 /* Get some of the read-only fields from the config BAR */
2934 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2935 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2936 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2937 hw->mtu = ETHER_MTU;
2939 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2940 hw->rx_offset = NFP_NET_RX_OFFSET;
2942 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2944 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
2945 hw->ver, hw->max_mtu);
2946 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2947 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2948 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2949 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2950 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2951 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2952 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2953 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2954 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2955 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2956 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2957 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2961 hw->stride_rx = stride;
2962 hw->stride_tx = stride;
2964 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2965 hw->max_rx_queues, hw->max_tx_queues);
2967 /* Initializing spinlock for reconfigs */
2968 rte_spinlock_init(&hw->reconfig_lock);
2970 /* Allocating memory for mac addr */
2971 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2972 if (eth_dev->data->mac_addrs == NULL) {
2973 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2975 goto dev_err_queues_map;
2979 nfp_net_pf_read_mac(hwport0, port);
2980 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2982 nfp_net_vf_read_mac(hw);
2985 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
2986 PMD_INIT_LOG(INFO, "Using random mac address for port %d\n",
2988 /* Using random mac addresses for VFs */
2989 eth_random_addr(&hw->mac_addr[0]);
2990 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2993 /* Copying mac address to DPDK eth_dev struct */
2994 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2995 ð_dev->data->mac_addrs[0]);
2997 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2998 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2999 eth_dev->data->port_id, pci_dev->id.vendor_id,
3000 pci_dev->id.device_id,
3001 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
3002 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
3004 /* Registering LSC interrupt handler */
3005 rte_intr_callback_register(&pci_dev->intr_handle,
3006 nfp_net_dev_interrupt_handler,
3009 /* Telling the firmware about the LSC interrupt entry */
3010 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3012 /* Recording current stats counters values */
3013 nfp_net_stats_reset(eth_dev);
3018 nfp_cpp_area_free(hw->hwqueues_area);
3020 nfp_cpp_area_free(hw->ctrl_area);
3026 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
3027 struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
3028 int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
3030 struct rte_eth_dev *eth_dev;
3031 struct nfp_net_hw *hw;
3035 port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
3040 sprintf(port_name, "%s_port%d", dev->device.name, port);
3042 sprintf(port_name, "%s", dev->device.name);
3044 eth_dev = rte_eth_dev_allocate(port_name);
3049 *priv = rte_zmalloc(port_name,
3050 sizeof(struct nfp_net_adapter) * ports,
3051 RTE_CACHE_LINE_SIZE);
3053 rte_eth_dev_release_port(eth_dev);
3058 eth_dev->data->dev_private = *priv;
3061 * dev_private pointing to port0 dev_private because we need
3062 * to configure vNIC bars based on port0 at nfp_net_init.
3063 * Then dev_private is adjusted per port.
3065 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
3067 hw->hwinfo = hwinfo;
3068 hw->sym_tbl = sym_tbl;
3069 hw->pf_port_idx = phys_port;
3072 hw->pf_multiport_enabled = 1;
3074 hw->total_ports = ports;
3076 eth_dev->device = &dev->device;
3077 rte_eth_copy_pci_info(eth_dev, dev);
3079 ret = nfp_net_init(eth_dev);
3082 rte_eth_dev_release_port(eth_dev);
3084 rte_free(port_name);
3089 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
3092 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3094 struct nfp_cpp *cpp = nsp->cpp;
3099 struct stat file_stat;
3102 /* Looking for firmware file in order of priority */
3104 /* First try to find a firmware image specific for this device */
3105 sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3106 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3107 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3108 cpp->interface & 0xff);
3110 sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
3112 RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3113 fw_f = open(fw_name, O_RDONLY);
3117 /* Then try the PCI name */
3118 sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
3120 RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3121 fw_f = open(fw_name, O_RDONLY);
3125 /* Finally try the card type and media */
3126 sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
3127 RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3128 fw_f = open(fw_name, O_RDONLY);
3130 RTE_LOG(INFO, PMD, "Firmware file %s not found.", fw_name);
3135 if (fstat(fw_f, &file_stat) < 0) {
3136 RTE_LOG(INFO, PMD, "Firmware file %s size is unknown", fw_name);
3141 fsize = file_stat.st_size;
3142 RTE_LOG(INFO, PMD, "Firmware file found at %s with size: %" PRIu64 "\n",
3143 fw_name, (uint64_t)fsize);
3145 fw_buf = malloc((size_t)fsize);
3147 RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
3151 memset(fw_buf, 0, fsize);
3153 bytes = read(fw_f, fw_buf, fsize);
3154 if (bytes != fsize) {
3155 RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n"
3156 "Just %" PRIu64 " of %" PRIu64 " bytes read",
3157 (uint64_t)bytes, (uint64_t)fsize);
3163 RTE_LOG(INFO, PMD, "Uploading the firmware ...");
3164 nfp_nsp_load_fw(nsp, fw_buf, bytes);
3165 RTE_LOG(INFO, PMD, "Done");
3174 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3175 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3177 struct nfp_nsp *nsp;
3178 const char *nfp_fw_model;
3179 char card_desc[100];
3182 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3185 RTE_LOG(INFO, PMD, "firmware model found: %s\n", nfp_fw_model);
3187 RTE_LOG(ERR, PMD, "firmware model NOT found\n");
3191 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3192 RTE_LOG(ERR, PMD, "NFP ethernet table reports wrong ports: %u\n",
3193 nfp_eth_table->count);
3197 RTE_LOG(INFO, PMD, "NFP ethernet port table reports %u ports\n",
3198 nfp_eth_table->count);
3200 RTE_LOG(INFO, PMD, "Port speed: %u\n", nfp_eth_table->ports[0].speed);
3202 sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
3203 nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
3205 nsp = nfp_nsp_open(cpp);
3207 RTE_LOG(ERR, PMD, "NFP error when obtaining NSP handle\n");
3211 nfp_nsp_device_soft_reset(nsp);
3212 err = nfp_fw_upload(dev, nsp, card_desc);
3218 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3219 struct rte_pci_device *dev)
3221 struct nfp_cpp *cpp;
3222 struct nfp_hwinfo *hwinfo;
3223 struct nfp_rtsym_table *sym_tbl;
3224 struct nfp_eth_table *nfp_eth_table = NULL;
3234 cpp = nfp_cpp_from_device_name(dev->device.name);
3236 RTE_LOG(ERR, PMD, "A CPP handle can not be obtained");
3241 hwinfo = nfp_hwinfo_read(cpp);
3243 RTE_LOG(ERR, PMD, "Error reading hwinfo table");
3247 nfp_eth_table = nfp_eth_read_ports(cpp);
3248 if (!nfp_eth_table) {
3249 RTE_LOG(ERR, PMD, "Error reading NFP ethernet table\n");
3253 if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
3254 RTE_LOG(INFO, PMD, "Error when uploading firmware\n");
3259 /* Now the symbol table should be there */
3260 sym_tbl = nfp_rtsym_table_read(cpp);
3262 RTE_LOG(ERR, PMD, "Something is wrong with the firmware"
3268 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3269 if (total_ports != (int)nfp_eth_table->count) {
3270 RTE_LOG(ERR, PMD, "Inconsistent number of ports\n");
3274 PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
3276 if (total_ports <= 0 || total_ports > 8) {
3277 RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
3282 for (i = 0; i < total_ports; i++) {
3283 ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
3284 nfp_eth_table->ports[i].index,
3291 free(nfp_eth_table);
3295 int nfp_logtype_init;
3296 int nfp_logtype_driver;
3298 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3300 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3301 PCI_DEVICE_ID_NFP4000_PF_NIC)
3304 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3305 PCI_DEVICE_ID_NFP6000_PF_NIC)
3312 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3314 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3315 PCI_DEVICE_ID_NFP6000_VF_NIC)
3322 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3323 struct rte_pci_device *pci_dev)
3325 return rte_eth_dev_pci_generic_probe(pci_dev,
3326 sizeof(struct nfp_net_adapter), nfp_net_init);
3329 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3331 struct rte_eth_dev *eth_dev;
3332 struct nfp_net_hw *hw, *hwport0;
3335 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
3336 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
3337 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
3338 port = get_pf_port_number(eth_dev->data->name);
3340 * hotplug is not possible with multiport PF although freeing
3341 * data structures can be done for first port.
3345 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3346 hw = &hwport0[port];
3347 nfp_cpp_area_free(hw->ctrl_area);
3348 nfp_cpp_area_free(hw->hwqueues_area);
3351 nfp_cpp_free(hw->cpp);
3353 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3355 /* hotplug is not possible with multiport PF */
3356 if (hw->pf_multiport_enabled)
3358 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3361 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3362 .id_table = pci_id_nfp_pf_net_map,
3363 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3364 .probe = nfp_pf_pci_probe,
3365 .remove = eth_nfp_pci_remove,
3368 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3369 .id_table = pci_id_nfp_vf_net_map,
3370 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3371 .probe = eth_nfp_pci_probe,
3372 .remove = eth_nfp_pci_remove,
3375 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3376 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3377 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3378 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3379 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3380 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3382 RTE_INIT(nfp_init_log);
3386 nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
3387 if (nfp_logtype_init >= 0)
3388 rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
3389 nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
3390 if (nfp_logtype_driver >= 0)
3391 rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
3395 * c-file-style: "Linux"
3396 * indent-tabs-mode: t