1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_net.c
13 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
33 #include "eal_firmware.h"
35 #include "nfpcore/nfp_cpp.h"
36 #include "nfpcore/nfp_nffw.h"
37 #include "nfpcore/nfp_hwinfo.h"
38 #include "nfpcore/nfp_mip.h"
39 #include "nfpcore/nfp_rtsym.h"
40 #include "nfpcore/nfp_nsp.h"
42 #include "nfp_net_pmd.h"
44 #include "nfp_net_logs.h"
45 #include "nfp_net_ctrl.h"
47 #include <sys/types.h>
48 #include <sys/socket.h>
52 #include <sys/ioctl.h>
56 static int nfp_net_close(struct rte_eth_dev *dev);
57 static int nfp_net_configure(struct rte_eth_dev *dev);
58 static void nfp_net_dev_interrupt_handler(void *param);
59 static void nfp_net_dev_interrupt_delayed_handler(void *param);
60 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
61 static int nfp_net_infos_get(struct rte_eth_dev *dev,
62 struct rte_eth_dev_info *dev_info);
63 static int nfp_net_init(struct rte_eth_dev *eth_dev);
64 static int nfp_pf_init(struct rte_pci_device *pci_dev);
65 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev);
66 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
67 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
68 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
69 static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
70 static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
71 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
72 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
74 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
76 static void nfp_net_rx_queue_release(void *rxq);
77 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
78 uint16_t nb_desc, unsigned int socket_id,
79 const struct rte_eth_rxconf *rx_conf,
80 struct rte_mempool *mp);
81 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
82 static void nfp_net_tx_queue_release(void *txq);
83 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
84 uint16_t nb_desc, unsigned int socket_id,
85 const struct rte_eth_txconf *tx_conf);
86 static int nfp_net_start(struct rte_eth_dev *dev);
87 static int nfp_net_stats_get(struct rte_eth_dev *dev,
88 struct rte_eth_stats *stats);
89 static int nfp_net_stats_reset(struct rte_eth_dev *dev);
90 static int nfp_net_stop(struct rte_eth_dev *dev);
91 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
94 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
95 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
96 struct rte_eth_rss_conf *rss_conf);
97 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
98 struct rte_eth_rss_reta_entry64 *reta_conf,
100 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
101 struct rte_eth_rss_conf *rss_conf);
102 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
103 struct rte_ether_addr *mac_addr);
104 static int32_t nfp_cpp_bridge_service_func(void *args);
105 static void nfp_register_cpp_service(struct nfp_cpp *cpp);
106 static int nfp_fw_setup(struct rte_pci_device *dev,
108 struct nfp_eth_table *nfp_eth_table,
109 struct nfp_hwinfo *hwinfo);
112 /* The offset of the queue controller queues in the PCIe Target */
113 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
115 /* Maximum value which can be added to a queue with one transaction */
116 #define NFP_QCP_MAX_ADD 0x7f
118 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
119 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
121 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
123 NFP_QCP_READ_PTR = 0,
128 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
129 * @q: Base address for queue structure
130 * @ptr: Add to the Read or Write pointer
131 * @val: Value to add to the queue pointer
133 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
136 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
140 if (ptr == NFP_QCP_READ_PTR)
141 off = NFP_QCP_QUEUE_ADD_RPTR;
143 off = NFP_QCP_QUEUE_ADD_WPTR;
145 while (val > NFP_QCP_MAX_ADD) {
146 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
147 val -= NFP_QCP_MAX_ADD;
150 nn_writel(rte_cpu_to_le_32(val), q + off);
154 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
155 * @q: Base address for queue structure
156 * @ptr: Read or Write pointer
158 static inline uint32_t
159 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
164 if (ptr == NFP_QCP_READ_PTR)
165 off = NFP_QCP_QUEUE_STS_LO;
167 off = NFP_QCP_QUEUE_STS_HI;
169 val = rte_cpu_to_le_32(nn_readl(q + off));
171 if (ptr == NFP_QCP_READ_PTR)
172 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
174 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
178 * Functions to read/write from/to Config BAR
179 * Performs any endian conversion necessary.
181 static inline uint8_t
182 nn_cfg_readb(struct nfp_net_hw *hw, int off)
184 return nn_readb(hw->ctrl_bar + off);
188 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
190 nn_writeb(val, hw->ctrl_bar + off);
193 static inline uint32_t
194 nn_cfg_readl(struct nfp_net_hw *hw, int off)
196 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
200 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
202 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
205 static inline uint64_t
206 nn_cfg_readq(struct nfp_net_hw *hw, int off)
208 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
212 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
214 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
218 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
222 if (rxq->rxbufs == NULL)
225 for (i = 0; i < rxq->rx_count; i++) {
226 if (rxq->rxbufs[i].mbuf) {
227 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
228 rxq->rxbufs[i].mbuf = NULL;
234 nfp_net_rx_queue_release(void *rx_queue)
236 struct nfp_net_rxq *rxq = rx_queue;
239 nfp_net_rx_queue_release_mbufs(rxq);
240 rte_free(rxq->rxbufs);
246 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
248 nfp_net_rx_queue_release_mbufs(rxq);
254 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
258 if (txq->txbufs == NULL)
261 for (i = 0; i < txq->tx_count; i++) {
262 if (txq->txbufs[i].mbuf) {
263 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
264 txq->txbufs[i].mbuf = NULL;
270 nfp_net_tx_queue_release(void *tx_queue)
272 struct nfp_net_txq *txq = tx_queue;
275 nfp_net_tx_queue_release_mbufs(txq);
276 rte_free(txq->txbufs);
282 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
284 nfp_net_tx_queue_release_mbufs(txq);
290 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
294 struct timespec wait;
296 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
299 if (hw->qcp_cfg == NULL)
300 rte_panic("Bad configuration queue pointer\n");
302 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
305 wait.tv_nsec = 1000000;
307 PMD_DRV_LOG(DEBUG, "Polling for update ack...");
309 /* Poll update field, waiting for NFP to ack the config */
310 for (cnt = 0; ; cnt++) {
311 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
314 if (new & NFP_NET_CFG_UPDATE_ERR) {
315 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
318 if (cnt >= NFP_NET_POLL_TIMEOUT) {
319 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
320 " %dms", update, cnt);
321 rte_panic("Exiting\n");
323 nanosleep(&wait, 0); /* waiting for a 1ms */
325 PMD_DRV_LOG(DEBUG, "Ack DONE");
330 * Reconfigure the NIC
331 * @nn: device to reconfigure
332 * @ctrl: The value for the ctrl field in the BAR config
333 * @update: The value for the update field in the BAR config
335 * Write the update word to the BAR and ping the reconfig queue. Then poll
336 * until the firmware has acknowledged the update by zeroing the update word.
339 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
343 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
346 rte_spinlock_lock(&hw->reconfig_lock);
348 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
349 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
353 err = __nfp_net_reconfig(hw, update);
355 rte_spinlock_unlock(&hw->reconfig_lock);
361 * Reconfig errors imply situations where they can be handled.
362 * Otherwise, rte_panic is called inside __nfp_net_reconfig
364 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
370 * Configure an Ethernet device. This function must be invoked first
371 * before any other function in the Ethernet API. This function can
372 * also be re-invoked when a device is in the stopped state.
375 nfp_net_configure(struct rte_eth_dev *dev)
377 struct rte_eth_conf *dev_conf;
378 struct rte_eth_rxmode *rxmode;
379 struct rte_eth_txmode *txmode;
380 struct nfp_net_hw *hw;
382 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
385 * A DPDK app sends info about how many queues to use and how
386 * those queues need to be configured. This is used by the
387 * DPDK core and it makes sure no more queues than those
388 * advertised by the driver are requested. This function is
389 * called after that internal process
392 PMD_INIT_LOG(DEBUG, "Configure");
394 dev_conf = &dev->data->dev_conf;
395 rxmode = &dev_conf->rxmode;
396 txmode = &dev_conf->txmode;
398 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
399 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
401 /* Checking TX mode */
402 if (txmode->mq_mode) {
403 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
407 /* Checking RX mode */
408 if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
409 !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
410 PMD_INIT_LOG(INFO, "RSS not supported");
418 nfp_net_enable_queues(struct rte_eth_dev *dev)
420 struct nfp_net_hw *hw;
421 uint64_t enabled_queues = 0;
424 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
426 /* Enabling the required TX queues in the device */
427 for (i = 0; i < dev->data->nb_tx_queues; i++)
428 enabled_queues |= (1 << i);
430 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
434 /* Enabling the required RX queues in the device */
435 for (i = 0; i < dev->data->nb_rx_queues; i++)
436 enabled_queues |= (1 << i);
438 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
442 nfp_net_disable_queues(struct rte_eth_dev *dev)
444 struct nfp_net_hw *hw;
445 uint32_t new_ctrl, update = 0;
447 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
450 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
452 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
453 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
454 NFP_NET_CFG_UPDATE_MSIX;
456 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
457 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
459 /* If an error when reconfig we avoid to change hw state */
460 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
467 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
471 for (i = 0; i < dev->data->nb_rx_queues; i++) {
472 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
479 nfp_net_params_setup(struct nfp_net_hw *hw)
481 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
482 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
486 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
488 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
491 #define ETH_ADDR_LEN 6
494 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
498 for (i = 0; i < ETH_ADDR_LEN; i++)
503 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
505 struct nfp_eth_table *nfp_eth_table;
506 struct nfp_net_hw *hw = NULL;
508 /* Grab a pointer to the correct physical port */
509 hw = pf_dev->ports[port];
511 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
513 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
514 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
521 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
525 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
526 memcpy(&hw->mac_addr[0], &tmp, 4);
528 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
529 memcpy(&hw->mac_addr[4], &tmp, 2);
533 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
535 uint32_t mac0 = *(uint32_t *)mac;
538 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
541 mac1 = *(uint16_t *)mac;
542 nn_writew(rte_cpu_to_be_16(mac1),
543 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
547 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
549 struct nfp_net_hw *hw;
550 uint32_t update, ctrl;
552 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
554 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
555 PMD_INIT_LOG(INFO, "MAC address unable to change when"
560 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
561 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
564 /* Writing new MAC to the specific port BAR address */
565 nfp_net_write_mac(hw, (uint8_t *)mac_addr);
567 /* Signal the NIC about the change */
568 update = NFP_NET_CFG_UPDATE_MACADDR;
570 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
571 (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
572 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
573 if (nfp_net_reconfig(hw, ctrl, update) < 0) {
574 PMD_INIT_LOG(INFO, "MAC address update failed");
581 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
582 struct rte_intr_handle *intr_handle)
584 struct nfp_net_hw *hw;
587 if (!intr_handle->intr_vec) {
588 intr_handle->intr_vec =
589 rte_zmalloc("intr_vec",
590 dev->data->nb_rx_queues * sizeof(int), 0);
591 if (!intr_handle->intr_vec) {
592 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
593 " intr_vec", dev->data->nb_rx_queues);
598 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
600 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
601 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
602 /* UIO just supports one queue and no LSC*/
603 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
604 intr_handle->intr_vec[0] = 0;
606 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
607 for (i = 0; i < dev->data->nb_rx_queues; i++) {
609 * The first msix vector is reserved for non
612 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
613 intr_handle->intr_vec[i] = i + 1;
614 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
615 intr_handle->intr_vec[i]);
619 /* Avoiding TX interrupts */
620 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
625 nfp_check_offloads(struct rte_eth_dev *dev)
627 struct nfp_net_hw *hw;
628 struct rte_eth_conf *dev_conf;
629 struct rte_eth_rxmode *rxmode;
630 struct rte_eth_txmode *txmode;
633 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635 dev_conf = &dev->data->dev_conf;
636 rxmode = &dev_conf->rxmode;
637 txmode = &dev_conf->txmode;
639 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
640 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
641 ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
644 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
645 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
646 ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
649 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
650 hw->mtu = rxmode->max_rx_pkt_len;
652 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
653 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
656 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
657 ctrl |= NFP_NET_CFG_CTRL_L2BC;
660 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
661 ctrl |= NFP_NET_CFG_CTRL_L2MC;
663 /* TX checksum offload */
664 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
665 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
666 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
667 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
670 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
671 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
672 ctrl |= NFP_NET_CFG_CTRL_LSO;
674 ctrl |= NFP_NET_CFG_CTRL_LSO2;
678 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
679 ctrl |= NFP_NET_CFG_CTRL_GATHER;
685 nfp_net_start(struct rte_eth_dev *dev)
687 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
688 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
689 uint32_t new_ctrl, update = 0;
690 struct nfp_net_hw *hw;
691 struct nfp_pf_dev *pf_dev;
692 struct rte_eth_conf *dev_conf;
693 struct rte_eth_rxmode *rxmode;
694 uint32_t intr_vector;
697 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
698 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700 PMD_INIT_LOG(DEBUG, "Start");
702 /* Disabling queues just in case... */
703 nfp_net_disable_queues(dev);
705 /* Enabling the required queues in the device */
706 nfp_net_enable_queues(dev);
708 /* check and configure queue intr-vector mapping */
709 if (dev->data->dev_conf.intr_conf.rxq != 0) {
710 if (pf_dev->multiport) {
711 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
712 "with NFP multiport PF");
715 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
717 * Better not to share LSC with RX interrupts.
718 * Unregistering LSC interrupt handler
720 rte_intr_callback_unregister(&pci_dev->intr_handle,
721 nfp_net_dev_interrupt_handler, (void *)dev);
723 if (dev->data->nb_rx_queues > 1) {
724 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
725 "supports 1 queue with UIO");
729 intr_vector = dev->data->nb_rx_queues;
730 if (rte_intr_efd_enable(intr_handle, intr_vector))
733 nfp_configure_rx_interrupt(dev, intr_handle);
734 update = NFP_NET_CFG_UPDATE_MSIX;
737 rte_intr_enable(intr_handle);
739 new_ctrl = nfp_check_offloads(dev);
741 /* Writing configuration parameters in the device */
742 nfp_net_params_setup(hw);
744 dev_conf = &dev->data->dev_conf;
745 rxmode = &dev_conf->rxmode;
747 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
748 nfp_net_rss_config_default(dev);
749 update |= NFP_NET_CFG_UPDATE_RSS;
750 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
754 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
756 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
758 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
759 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
761 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
762 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
766 * Allocating rte mbufs for configured rx queues.
767 * This requires queues being enabled before
769 if (nfp_net_rx_freelist_setup(dev) < 0) {
774 if (hw->is_phyport) {
775 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
776 /* Configure the physical port up */
777 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
779 nfp_eth_set_configured(dev->process_private,
789 * An error returned by this function should mean the app
790 * exiting and then the system releasing all the memory
791 * allocated even memory coming from hugepages.
793 * The device could be enabled at this point with some queues
794 * ready for getting packets. This is true if the call to
795 * nfp_net_rx_freelist_setup() succeeds for some queues but
796 * fails for subsequent queues.
798 * This should make the app exiting but better if we tell the
801 nfp_net_disable_queues(dev);
806 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
808 nfp_net_stop(struct rte_eth_dev *dev)
811 struct nfp_net_hw *hw;
813 PMD_INIT_LOG(DEBUG, "Stop");
815 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
817 nfp_net_disable_queues(dev);
820 for (i = 0; i < dev->data->nb_tx_queues; i++) {
821 nfp_net_reset_tx_queue(
822 (struct nfp_net_txq *)dev->data->tx_queues[i]);
825 for (i = 0; i < dev->data->nb_rx_queues; i++) {
826 nfp_net_reset_rx_queue(
827 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
830 if (hw->is_phyport) {
831 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
832 /* Configure the physical port down */
833 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
835 nfp_eth_set_configured(dev->process_private,
842 /* Set the link up. */
844 nfp_net_set_link_up(struct rte_eth_dev *dev)
846 struct nfp_net_hw *hw;
848 PMD_DRV_LOG(DEBUG, "Set link up");
850 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
856 /* Configure the physical port down */
857 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
859 return nfp_eth_set_configured(dev->process_private,
863 /* Set the link down. */
865 nfp_net_set_link_down(struct rte_eth_dev *dev)
867 struct nfp_net_hw *hw;
869 PMD_DRV_LOG(DEBUG, "Set link down");
871 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
876 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
877 /* Configure the physical port down */
878 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
880 return nfp_eth_set_configured(dev->process_private,
884 /* Reset and stop device. The device can not be restarted. */
886 nfp_net_close(struct rte_eth_dev *dev)
888 struct nfp_net_hw *hw;
889 struct rte_pci_device *pci_dev;
892 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
895 PMD_INIT_LOG(DEBUG, "Close");
897 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
898 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
901 * We assume that the DPDK application is stopping all the
902 * threads/queues before calling the device close function.
905 nfp_net_disable_queues(dev);
908 for (i = 0; i < dev->data->nb_tx_queues; i++) {
909 nfp_net_reset_tx_queue(
910 (struct nfp_net_txq *)dev->data->tx_queues[i]);
913 for (i = 0; i < dev->data->nb_rx_queues; i++) {
914 nfp_net_reset_rx_queue(
915 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
918 /* Only free PF resources after all physical ports have been closed */
919 if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
920 pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
921 struct nfp_pf_dev *pf_dev;
922 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
924 /* Mark this port as unused and free device priv resources*/
925 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
926 pf_dev->ports[hw->idx] = NULL;
927 rte_eth_dev_release_port(dev);
929 for (i = 0; i < pf_dev->total_phyports; i++) {
930 /* Check to see if ports are still in use */
931 if (pf_dev->ports[i])
935 /* Now it is safe to free all PF resources */
936 PMD_INIT_LOG(INFO, "Freeing PF resources");
937 nfp_cpp_area_free(pf_dev->ctrl_area);
938 nfp_cpp_area_free(pf_dev->hwqueues_area);
939 free(pf_dev->hwinfo);
940 free(pf_dev->sym_tbl);
941 nfp_cpp_free(pf_dev->cpp);
945 rte_intr_disable(&pci_dev->intr_handle);
947 /* unregister callback func from eal lib */
948 rte_intr_callback_unregister(&pci_dev->intr_handle,
949 nfp_net_dev_interrupt_handler,
953 * The ixgbe PMD driver disables the pcie master on the
954 * device. The i40e does not...
961 nfp_net_promisc_enable(struct rte_eth_dev *dev)
963 uint32_t new_ctrl, update = 0;
964 struct nfp_net_hw *hw;
967 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
969 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
971 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
972 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
976 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
977 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
981 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
982 update = NFP_NET_CFG_UPDATE_GEN;
985 * DPDK sets promiscuous mode on just after this call assuming
986 * it can not fail ...
988 ret = nfp_net_reconfig(hw, new_ctrl, update);
998 nfp_net_promisc_disable(struct rte_eth_dev *dev)
1000 uint32_t new_ctrl, update = 0;
1001 struct nfp_net_hw *hw;
1004 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1006 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
1007 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
1011 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
1012 update = NFP_NET_CFG_UPDATE_GEN;
1015 * DPDK sets promiscuous mode off just before this call
1016 * assuming it can not fail ...
1018 ret = nfp_net_reconfig(hw, new_ctrl, update);
1022 hw->ctrl = new_ctrl;
1028 * return 0 means link status changed, -1 means not changed
1030 * Wait to complete is needed as it can take up to 9 seconds to get the Link
1034 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1036 struct nfp_net_hw *hw;
1037 struct rte_eth_link link;
1038 uint32_t nn_link_status;
1041 static const uint32_t ls_to_ethtool[] = {
1042 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1043 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
1044 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
1045 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
1046 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
1047 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
1048 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
1049 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
1052 PMD_DRV_LOG(DEBUG, "Link update");
1054 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1058 memset(&link, 0, sizeof(struct rte_eth_link));
1060 if (nn_link_status & NFP_NET_CFG_STS_LINK)
1061 link.link_status = ETH_LINK_UP;
1063 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1065 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1066 NFP_NET_CFG_STS_LINK_RATE_MASK;
1068 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1069 link.link_speed = ETH_SPEED_NUM_NONE;
1071 link.link_speed = ls_to_ethtool[nn_link_status];
1073 ret = rte_eth_linkstatus_set(dev, &link);
1075 if (link.link_status)
1076 PMD_DRV_LOG(INFO, "NIC Link is Up");
1078 PMD_DRV_LOG(INFO, "NIC Link is Down");
1084 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1087 struct nfp_net_hw *hw;
1088 struct rte_eth_stats nfp_dev_stats;
1090 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1092 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1094 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1096 /* reading per RX ring stats */
1097 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1098 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1101 nfp_dev_stats.q_ipackets[i] =
1102 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1104 nfp_dev_stats.q_ipackets[i] -=
1105 hw->eth_stats_base.q_ipackets[i];
1107 nfp_dev_stats.q_ibytes[i] =
1108 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1110 nfp_dev_stats.q_ibytes[i] -=
1111 hw->eth_stats_base.q_ibytes[i];
1114 /* reading per TX ring stats */
1115 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1116 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1119 nfp_dev_stats.q_opackets[i] =
1120 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1122 nfp_dev_stats.q_opackets[i] -=
1123 hw->eth_stats_base.q_opackets[i];
1125 nfp_dev_stats.q_obytes[i] =
1126 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1128 nfp_dev_stats.q_obytes[i] -=
1129 hw->eth_stats_base.q_obytes[i];
1132 nfp_dev_stats.ipackets =
1133 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1135 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1137 nfp_dev_stats.ibytes =
1138 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1140 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1142 nfp_dev_stats.opackets =
1143 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1145 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1147 nfp_dev_stats.obytes =
1148 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1150 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1152 /* reading general device stats */
1153 nfp_dev_stats.ierrors =
1154 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1156 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1158 nfp_dev_stats.oerrors =
1159 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1161 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1163 /* RX ring mbuf allocation failures */
1164 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1166 nfp_dev_stats.imissed =
1167 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1169 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1172 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1179 nfp_net_stats_reset(struct rte_eth_dev *dev)
1182 struct nfp_net_hw *hw;
1184 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1187 * hw->eth_stats_base records the per counter starting point.
1188 * Lets update it now
1191 /* reading per RX ring stats */
1192 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1193 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1196 hw->eth_stats_base.q_ipackets[i] =
1197 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1199 hw->eth_stats_base.q_ibytes[i] =
1200 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1203 /* reading per TX ring stats */
1204 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1205 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1208 hw->eth_stats_base.q_opackets[i] =
1209 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1211 hw->eth_stats_base.q_obytes[i] =
1212 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1215 hw->eth_stats_base.ipackets =
1216 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1218 hw->eth_stats_base.ibytes =
1219 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1221 hw->eth_stats_base.opackets =
1222 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1224 hw->eth_stats_base.obytes =
1225 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1227 /* reading general device stats */
1228 hw->eth_stats_base.ierrors =
1229 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1231 hw->eth_stats_base.oerrors =
1232 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1234 /* RX ring mbuf allocation failures */
1235 dev->data->rx_mbuf_alloc_failed = 0;
1237 hw->eth_stats_base.imissed =
1238 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1244 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1246 struct nfp_net_hw *hw;
1248 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1250 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1251 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1252 dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1253 dev_info->max_rx_pktlen = hw->max_mtu;
1254 /* Next should change when PF support is implemented */
1255 dev_info->max_mac_addrs = 1;
1257 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1258 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1260 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1261 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1262 DEV_RX_OFFLOAD_UDP_CKSUM |
1263 DEV_RX_OFFLOAD_TCP_CKSUM;
1265 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1266 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1268 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1269 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1270 DEV_TX_OFFLOAD_UDP_CKSUM |
1271 DEV_TX_OFFLOAD_TCP_CKSUM;
1273 if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
1274 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1276 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1277 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1279 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1281 .pthresh = DEFAULT_RX_PTHRESH,
1282 .hthresh = DEFAULT_RX_HTHRESH,
1283 .wthresh = DEFAULT_RX_WTHRESH,
1285 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1289 dev_info->default_txconf = (struct rte_eth_txconf) {
1291 .pthresh = DEFAULT_TX_PTHRESH,
1292 .hthresh = DEFAULT_TX_HTHRESH,
1293 .wthresh = DEFAULT_TX_WTHRESH,
1295 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1296 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1299 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1300 .nb_max = NFP_NET_MAX_RX_DESC,
1301 .nb_min = NFP_NET_MIN_RX_DESC,
1302 .nb_align = NFP_ALIGN_RING_DESC,
1305 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1306 .nb_max = NFP_NET_MAX_TX_DESC,
1307 .nb_min = NFP_NET_MIN_TX_DESC,
1308 .nb_align = NFP_ALIGN_RING_DESC,
1309 .nb_seg_max = NFP_TX_MAX_SEG,
1310 .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1313 /* All NFP devices support jumbo frames */
1314 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1316 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
1317 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
1319 dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1320 ETH_RSS_NONFRAG_IPV4_TCP |
1321 ETH_RSS_NONFRAG_IPV4_UDP |
1323 ETH_RSS_NONFRAG_IPV6_TCP |
1324 ETH_RSS_NONFRAG_IPV6_UDP;
1326 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1327 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1330 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1331 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1332 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1337 static const uint32_t *
1338 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1340 static const uint32_t ptypes[] = {
1341 /* refers to nfp_net_set_hash() */
1342 RTE_PTYPE_INNER_L3_IPV4,
1343 RTE_PTYPE_INNER_L3_IPV6,
1344 RTE_PTYPE_INNER_L3_IPV6_EXT,
1345 RTE_PTYPE_INNER_L4_MASK,
1349 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1355 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1357 struct nfp_net_rxq *rxq;
1358 struct nfp_net_rx_desc *rxds;
1362 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1369 * Other PMDs are just checking the DD bit in intervals of 4
1370 * descriptors and counting all four if the first has the DD
1371 * bit on. Of course, this is not accurate but can be good for
1372 * performance. But ideally that should be done in descriptors
1373 * chunks belonging to the same cache line
1376 while (count < rxq->rx_count) {
1377 rxds = &rxq->rxds[idx];
1378 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1385 if ((idx) == rxq->rx_count)
1393 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1395 struct rte_pci_device *pci_dev;
1396 struct nfp_net_hw *hw;
1399 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1400 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1402 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1405 /* Make sure all updates are written before un-masking */
1407 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1408 NFP_NET_CFG_ICR_UNMASKED);
1413 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1415 struct rte_pci_device *pci_dev;
1416 struct nfp_net_hw *hw;
1419 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1420 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1422 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1425 /* Make sure all updates are written before un-masking */
1427 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1432 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1434 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1435 struct rte_eth_link link;
1437 rte_eth_linkstatus_get(dev, &link);
1438 if (link.link_status)
1439 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1440 dev->data->port_id, link.link_speed,
1441 link.link_duplex == ETH_LINK_FULL_DUPLEX
1442 ? "full-duplex" : "half-duplex");
1444 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1445 dev->data->port_id);
1447 PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1448 pci_dev->addr.domain, pci_dev->addr.bus,
1449 pci_dev->addr.devid, pci_dev->addr.function);
1452 /* Interrupt configuration and handling */
1455 * nfp_net_irq_unmask - Unmask an interrupt
1457 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1458 * clear the ICR for the entry.
1461 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1463 struct nfp_net_hw *hw;
1464 struct rte_pci_device *pci_dev;
1466 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1469 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1470 /* If MSI-X auto-masking is used, clear the entry */
1472 rte_intr_ack(&pci_dev->intr_handle);
1474 /* Make sure all updates are written before un-masking */
1476 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1477 NFP_NET_CFG_ICR_UNMASKED);
1482 nfp_net_dev_interrupt_handler(void *param)
1485 struct rte_eth_link link;
1486 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1488 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1490 rte_eth_linkstatus_get(dev, &link);
1492 nfp_net_link_update(dev, 0);
1495 if (!link.link_status) {
1496 /* handle it 1 sec later, wait it being stable */
1497 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1498 /* likely to down */
1500 /* handle it 4 sec later, wait it being stable */
1501 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1504 if (rte_eal_alarm_set(timeout * 1000,
1505 nfp_net_dev_interrupt_delayed_handler,
1507 PMD_INIT_LOG(ERR, "Error setting alarm");
1509 nfp_net_irq_unmask(dev);
1514 * Interrupt handler which shall be registered for alarm callback for delayed
1515 * handling specific interrupt to wait for the stable nic state. As the NIC
1516 * interrupt state is not stable for nfp after link is just down, it needs
1517 * to wait 4 seconds to get the stable status.
1519 * @param handle Pointer to interrupt handle.
1520 * @param param The address of parameter (struct rte_eth_dev *)
1525 nfp_net_dev_interrupt_delayed_handler(void *param)
1527 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1529 nfp_net_link_update(dev, 0);
1530 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1532 nfp_net_dev_link_status_print(dev);
1535 nfp_net_irq_unmask(dev);
1539 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1541 struct nfp_net_hw *hw;
1543 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545 /* check that mtu is within the allowed range */
1546 if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
1549 /* mtu setting is forbidden if port is started */
1550 if (dev->data->dev_started) {
1551 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1552 dev->data->port_id);
1556 /* switch to jumbo mode if needed */
1557 if ((uint32_t)mtu > RTE_ETHER_MTU)
1558 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1560 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1562 /* update max frame size */
1563 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1565 /* writing to configuration space */
1566 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1574 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1575 uint16_t queue_idx, uint16_t nb_desc,
1576 unsigned int socket_id,
1577 const struct rte_eth_rxconf *rx_conf,
1578 struct rte_mempool *mp)
1580 const struct rte_memzone *tz;
1581 struct nfp_net_rxq *rxq;
1582 struct nfp_net_hw *hw;
1583 uint32_t rx_desc_sz;
1585 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1587 PMD_INIT_FUNC_TRACE();
1589 /* Validating number of descriptors */
1590 rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
1591 if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
1592 nb_desc > NFP_NET_MAX_RX_DESC ||
1593 nb_desc < NFP_NET_MIN_RX_DESC) {
1594 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1599 * Free memory prior to re-allocation if needed. This is the case after
1600 * calling nfp_net_stop
1602 if (dev->data->rx_queues[queue_idx]) {
1603 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1604 dev->data->rx_queues[queue_idx] = NULL;
1607 /* Allocating rx queue data structure */
1608 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1609 RTE_CACHE_LINE_SIZE, socket_id);
1613 /* Hw queues mapping based on firmware configuration */
1614 rxq->qidx = queue_idx;
1615 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1616 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1617 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1618 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1621 * Tracking mbuf size for detecting a potential mbuf overflow due to
1625 rxq->mbuf_size = rxq->mem_pool->elt_size;
1626 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1627 hw->flbufsz = rxq->mbuf_size;
1629 rxq->rx_count = nb_desc;
1630 rxq->port_id = dev->data->port_id;
1631 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1632 rxq->drop_en = rx_conf->rx_drop_en;
1635 * Allocate RX ring hardware descriptors. A memzone large enough to
1636 * handle the maximum ring size is allocated in order to allow for
1637 * resizing in later calls to the queue setup function.
1639 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1640 sizeof(struct nfp_net_rx_desc) *
1641 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1645 PMD_DRV_LOG(ERR, "Error allocating rx dma");
1646 nfp_net_rx_queue_release(rxq);
1650 /* Saving physical and virtual addresses for the RX ring */
1651 rxq->dma = (uint64_t)tz->iova;
1652 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1654 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1655 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1656 sizeof(*rxq->rxbufs) * nb_desc,
1657 RTE_CACHE_LINE_SIZE, socket_id);
1658 if (rxq->rxbufs == NULL) {
1659 nfp_net_rx_queue_release(rxq);
1663 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1664 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1666 nfp_net_reset_rx_queue(rxq);
1668 dev->data->rx_queues[queue_idx] = rxq;
1672 * Telling the HW about the physical address of the RX ring and number
1673 * of descriptors in log2 format
1675 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1676 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1682 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1684 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1688 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
1691 for (i = 0; i < rxq->rx_count; i++) {
1692 struct nfp_net_rx_desc *rxd;
1693 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1696 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
1697 (unsigned)rxq->qidx);
1701 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1703 rxd = &rxq->rxds[i];
1705 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1706 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1708 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
1711 /* Make sure all writes are flushed before telling the hardware */
1714 /* Not advertising the whole ring as the firmware gets confused if so */
1715 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
1718 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1724 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1725 uint16_t nb_desc, unsigned int socket_id,
1726 const struct rte_eth_txconf *tx_conf)
1728 const struct rte_memzone *tz;
1729 struct nfp_net_txq *txq;
1730 uint16_t tx_free_thresh;
1731 struct nfp_net_hw *hw;
1732 uint32_t tx_desc_sz;
1734 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1736 PMD_INIT_FUNC_TRACE();
1738 /* Validating number of descriptors */
1739 tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
1740 if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
1741 nb_desc > NFP_NET_MAX_TX_DESC ||
1742 nb_desc < NFP_NET_MIN_TX_DESC) {
1743 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1747 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1748 tx_conf->tx_free_thresh :
1749 DEFAULT_TX_FREE_THRESH);
1751 if (tx_free_thresh > (nb_desc)) {
1753 "tx_free_thresh must be less than the number of TX "
1754 "descriptors. (tx_free_thresh=%u port=%d "
1755 "queue=%d)", (unsigned int)tx_free_thresh,
1756 dev->data->port_id, (int)queue_idx);
1761 * Free memory prior to re-allocation if needed. This is the case after
1762 * calling nfp_net_stop
1764 if (dev->data->tx_queues[queue_idx]) {
1765 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1767 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1768 dev->data->tx_queues[queue_idx] = NULL;
1771 /* Allocating tx queue data structure */
1772 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1773 RTE_CACHE_LINE_SIZE, socket_id);
1775 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1780 * Allocate TX ring hardware descriptors. A memzone large enough to
1781 * handle the maximum ring size is allocated in order to allow for
1782 * resizing in later calls to the queue setup function.
1784 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1785 sizeof(struct nfp_net_tx_desc) *
1786 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1789 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1790 nfp_net_tx_queue_release(txq);
1794 txq->tx_count = nb_desc;
1795 txq->tx_free_thresh = tx_free_thresh;
1796 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1797 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1798 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1800 /* queue mapping based on firmware configuration */
1801 txq->qidx = queue_idx;
1802 txq->tx_qcidx = queue_idx * hw->stride_tx;
1803 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1805 txq->port_id = dev->data->port_id;
1807 /* Saving physical and virtual addresses for the TX ring */
1808 txq->dma = (uint64_t)tz->iova;
1809 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1811 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1812 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1813 sizeof(*txq->txbufs) * nb_desc,
1814 RTE_CACHE_LINE_SIZE, socket_id);
1815 if (txq->txbufs == NULL) {
1816 nfp_net_tx_queue_release(txq);
1819 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1820 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1822 nfp_net_reset_tx_queue(txq);
1824 dev->data->tx_queues[queue_idx] = txq;
1828 * Telling the HW about the physical address of the TX ring and number
1829 * of descriptors in log2 format
1831 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1832 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1837 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1839 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1840 struct rte_mbuf *mb)
1843 struct nfp_net_hw *hw = txq->hw;
1845 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
1848 ol_flags = mb->ol_flags;
1850 if (!(ol_flags & PKT_TX_TCP_SEG))
1853 txd->l3_offset = mb->l2_len;
1854 txd->l4_offset = mb->l2_len + mb->l3_len;
1855 txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
1856 txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
1857 txd->flags = PCIE_DESC_TX_LSO;
1864 txd->lso_hdrlen = 0;
1868 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1870 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1871 struct rte_mbuf *mb)
1874 struct nfp_net_hw *hw = txq->hw;
1876 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1879 ol_flags = mb->ol_flags;
1881 /* IPv6 does not need checksum */
1882 if (ol_flags & PKT_TX_IP_CKSUM)
1883 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1885 switch (ol_flags & PKT_TX_L4_MASK) {
1886 case PKT_TX_UDP_CKSUM:
1887 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1889 case PKT_TX_TCP_CKSUM:
1890 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1894 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1895 txd->flags |= PCIE_DESC_TX_CSUM;
1898 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1900 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1901 struct rte_mbuf *mb)
1903 struct nfp_net_hw *hw = rxq->hw;
1905 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1908 /* If IPv4 and IP checksum error, fail */
1909 if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1910 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
1911 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1913 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1915 /* If neither UDP nor TCP return */
1916 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1917 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1920 if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
1921 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1923 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1926 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1927 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1929 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1932 * nfp_net_set_hash - Set mbuf hash data
1934 * The RSS hash and hash-type are pre-pended to the packet data.
1935 * Extract and decode it and set the mbuf fields.
1938 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1939 struct rte_mbuf *mbuf)
1941 struct nfp_net_hw *hw = rxq->hw;
1942 uint8_t *meta_offset;
1945 uint32_t hash_type = 0;
1947 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1950 /* this is true for new firmwares */
1951 if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
1952 (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
1953 NFP_DESC_META_LEN(rxd))) {
1956 * <---- 32 bit ----->
1961 * ====================
1964 * Field type word contains up to 8 4bit field types
1965 * A 4bit field type refers to a data field word
1966 * A data field word can have several 4bit field types
1968 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1969 meta_offset -= NFP_DESC_META_LEN(rxd);
1970 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1972 /* NFP PMD just supports metadata for hashing */
1973 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1974 case NFP_NET_META_HASH:
1975 /* next field type is about the hash type */
1976 meta_info >>= NFP_NET_META_FIELD_SIZE;
1977 /* hash value is in the data field */
1978 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1979 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1982 /* Unsupported metadata can be a performance issue */
1986 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1989 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1990 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1993 mbuf->hash.rss = hash;
1994 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1996 switch (hash_type) {
1997 case NFP_NET_RSS_IPV4:
1998 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
2000 case NFP_NET_RSS_IPV6:
2001 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
2003 case NFP_NET_RSS_IPV6_EX:
2004 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2006 case NFP_NET_RSS_IPV4_TCP:
2007 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2009 case NFP_NET_RSS_IPV6_TCP:
2010 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2012 case NFP_NET_RSS_IPV4_UDP:
2013 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2015 case NFP_NET_RSS_IPV6_UDP:
2016 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2019 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
2024 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
2026 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
2029 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
2034 * There are some decisions to take:
2035 * 1) How to check DD RX descriptors bit
2036 * 2) How and when to allocate new mbufs
2038 * Current implementation checks just one single DD bit each loop. As each
2039 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
2040 * a single cache line instead. Tests with this change have not shown any
2041 * performance improvement but it requires further investigation. For example,
2042 * depending on which descriptor is next, the number of descriptors could be
2043 * less than 8 for just checking those in the same cache line. This implies
2044 * extra work which could be counterproductive by itself. Indeed, last firmware
2045 * changes are just doing this: writing several descriptors with the DD bit
2046 * for saving PCIe bandwidth and DMA operations from the NFP.
2048 * Mbuf allocation is done when a new packet is received. Then the descriptor
2049 * is automatically linked with the new mbuf and the old one is given to the
2050 * user. The main drawback with this design is mbuf allocation is heavier than
2051 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
2052 * cache point of view it does not seem allocating the mbuf early on as we are
2053 * doing now have any benefit at all. Again, tests with this change have not
2054 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
2055 * so looking at the implications of this type of allocation should be studied
2060 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2062 struct nfp_net_rxq *rxq;
2063 struct nfp_net_rx_desc *rxds;
2064 struct nfp_net_rx_buff *rxb;
2065 struct nfp_net_hw *hw;
2066 struct rte_mbuf *mb;
2067 struct rte_mbuf *new_mb;
2073 if (unlikely(rxq == NULL)) {
2075 * DPDK just checks the queue is lower than max queues
2076 * enabled. But the queue needs to be configured
2078 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2086 while (avail < nb_pkts) {
2087 rxb = &rxq->rxbufs[rxq->rd_p];
2088 if (unlikely(rxb == NULL)) {
2089 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2093 rxds = &rxq->rxds[rxq->rd_p];
2094 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2098 * Memory barrier to ensure that we won't do other
2099 * reads before the DD bit.
2104 * We got a packet. Let's alloc a new mbuf for refilling the
2105 * free descriptor ring as soon as possible
2107 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2108 if (unlikely(new_mb == NULL)) {
2109 RTE_LOG_DP(DEBUG, PMD,
2110 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2111 rxq->port_id, (unsigned int)rxq->qidx);
2112 nfp_net_mbuf_alloc_failed(rxq);
2119 * Grab the mbuf and refill the descriptor with the
2120 * previously allocated mbuf
2125 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
2126 rxds->rxd.data_len, rxq->mbuf_size);
2128 /* Size of this segment */
2129 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2130 /* Size of the whole packet. We just support 1 segment */
2131 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2133 if (unlikely((mb->data_len + hw->rx_offset) >
2136 * This should not happen and the user has the
2137 * responsibility of avoiding it. But we have
2138 * to give some info about the error
2140 RTE_LOG_DP(ERR, PMD,
2141 "mbuf overflow likely due to the RX offset.\n"
2142 "\t\tYour mbuf size should have extra space for"
2143 " RX offset=%u bytes.\n"
2144 "\t\tCurrently you just have %u bytes available"
2145 " but the received packet is %u bytes long",
2147 rxq->mbuf_size - hw->rx_offset,
2152 /* Filling the received mbuf with packet info */
2154 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2156 mb->data_off = RTE_PKTMBUF_HEADROOM +
2157 NFP_DESC_META_LEN(rxds);
2159 /* No scatter mode supported */
2163 mb->port = rxq->port_id;
2165 /* Checking the RSS flag */
2166 nfp_net_set_hash(rxq, rxds, mb);
2168 /* Checking the checksum flag */
2169 nfp_net_rx_cksum(rxq, rxds, mb);
2171 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2172 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2173 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2174 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2177 /* Adding the mbuf to the mbuf array passed by the app */
2178 rx_pkts[avail++] = mb;
2180 /* Now resetting and updating the descriptor */
2183 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2185 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2186 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2189 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2196 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
2197 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2199 nb_hold += rxq->nb_rx_hold;
2202 * FL descriptors needs to be written before incrementing the
2203 * FL queue WR pointer
2206 if (nb_hold > rxq->rx_free_thresh) {
2207 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
2208 rxq->port_id, (unsigned int)rxq->qidx,
2209 (unsigned)nb_hold, (unsigned)avail);
2210 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2213 rxq->nb_rx_hold = nb_hold;
2219 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2221 * @txq: TX queue to work with
2222 * Returns number of descriptors freed
2225 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2230 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2231 " status", txq->qidx);
2233 /* Work out how many packets have been sent */
2234 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2236 if (qcp_rd_p == txq->rd_p) {
2237 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2238 "packets (%u, %u)", txq->qidx,
2239 qcp_rd_p, txq->rd_p);
2243 if (qcp_rd_p > txq->rd_p)
2244 todo = qcp_rd_p - txq->rd_p;
2246 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2248 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
2249 qcp_rd_p, txq->rd_p, txq->rd_p);
2255 if (unlikely(txq->rd_p >= txq->tx_count))
2256 txq->rd_p -= txq->tx_count;
2261 /* Leaving always free descriptors for avoiding wrapping confusion */
2263 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2265 if (txq->wr_p >= txq->rd_p)
2266 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2268 return txq->rd_p - txq->wr_p - 8;
2272 * nfp_net_txq_full - Check if the TX queue free descriptors
2273 * is below tx_free_threshold
2275 * @txq: TX queue to check
2277 * This function uses the host copy* of read/write pointers
2280 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2282 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2286 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2288 struct nfp_net_txq *txq;
2289 struct nfp_net_hw *hw;
2290 struct nfp_net_tx_desc *txds, txd;
2291 struct rte_mbuf *pkt;
2293 int pkt_size, dma_size;
2294 uint16_t free_descs, issued_descs;
2295 struct rte_mbuf **lmbuf;
2300 txds = &txq->txds[txq->wr_p];
2302 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
2303 txq->qidx, txq->wr_p, nb_pkts);
2305 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2306 nfp_net_tx_free_bufs(txq);
2308 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2309 if (unlikely(free_descs == 0))
2316 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
2317 txq->qidx, nb_pkts);
2318 /* Sending packets */
2319 while ((i < nb_pkts) && free_descs) {
2320 /* Grabbing the mbuf linked to the current descriptor */
2321 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2322 /* Warming the cache for releasing the mbuf later on */
2323 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2325 pkt = *(tx_pkts + i);
2327 if (unlikely((pkt->nb_segs > 1) &&
2328 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2329 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2330 rte_panic("Multisegment packet unsupported\n");
2333 /* Checking if we have enough descriptors */
2334 if (unlikely(pkt->nb_segs > free_descs))
2338 * Checksum and VLAN flags just in the first descriptor for a
2339 * multisegment packet, but TSO info needs to be in all of them.
2341 txd.data_len = pkt->pkt_len;
2342 nfp_net_tx_tso(txq, &txd, pkt);
2343 nfp_net_tx_cksum(txq, &txd, pkt);
2345 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2346 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2347 txd.flags |= PCIE_DESC_TX_VLAN;
2348 txd.vlan = pkt->vlan_tci;
2352 * mbuf data_len is the data in one segment and pkt_len data
2353 * in the whole packet. When the packet is just one segment,
2354 * then data_len = pkt_len
2356 pkt_size = pkt->pkt_len;
2359 /* Copying TSO, VLAN and cksum info */
2362 /* Releasing mbuf used by this descriptor previously*/
2364 rte_pktmbuf_free_seg(*lmbuf);
2367 * Linking mbuf with descriptor for being released
2368 * next time descriptor is used
2372 dma_size = pkt->data_len;
2373 dma_addr = rte_mbuf_data_iova(pkt);
2374 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2375 "%" PRIx64 "", dma_addr);
2377 /* Filling descriptors fields */
2378 txds->dma_len = dma_size;
2379 txds->data_len = txd.data_len;
2380 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2381 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2382 ASSERT(free_descs > 0);
2386 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2389 pkt_size -= dma_size;
2392 * Making the EOP, packets with just one segment
2395 if (likely(!pkt_size))
2396 txds->offset_eop = PCIE_DESC_TX_EOP;
2398 txds->offset_eop = 0;
2401 /* Referencing next free TX descriptor */
2402 txds = &txq->txds[txq->wr_p];
2403 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2410 /* Increment write pointers. Force memory write before we let HW know */
2412 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2418 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2420 uint32_t new_ctrl, update;
2421 struct nfp_net_hw *hw;
2424 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427 /* Enable vlan strip if it is not configured yet */
2428 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2429 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2430 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2432 /* Disable vlan strip just if it is configured */
2433 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2434 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2435 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2440 update = NFP_NET_CFG_UPDATE_GEN;
2442 ret = nfp_net_reconfig(hw, new_ctrl, update);
2444 hw->ctrl = new_ctrl;
2450 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2451 struct rte_eth_rss_reta_entry64 *reta_conf,
2454 uint32_t reta, mask;
2457 struct nfp_net_hw *hw =
2458 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2460 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2461 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2462 "(%d) doesn't match the number hardware can supported "
2463 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2468 * Update Redirection Table. There are 128 8bit-entries which can be
2469 * manage as 32 32bit-entries
2471 for (i = 0; i < reta_size; i += 4) {
2472 /* Handling 4 RSS entries per loop */
2473 idx = i / RTE_RETA_GROUP_SIZE;
2474 shift = i % RTE_RETA_GROUP_SIZE;
2475 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2481 /* If all 4 entries were set, don't need read RETA register */
2483 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2485 for (j = 0; j < 4; j++) {
2486 if (!(mask & (0x1 << j)))
2489 /* Clearing the entry bits */
2490 reta &= ~(0xFF << (8 * j));
2491 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2493 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2499 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2501 nfp_net_reta_update(struct rte_eth_dev *dev,
2502 struct rte_eth_rss_reta_entry64 *reta_conf,
2505 struct nfp_net_hw *hw =
2506 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2510 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2513 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2517 update = NFP_NET_CFG_UPDATE_RSS;
2519 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2525 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2527 nfp_net_reta_query(struct rte_eth_dev *dev,
2528 struct rte_eth_rss_reta_entry64 *reta_conf,
2534 struct nfp_net_hw *hw;
2536 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2538 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2541 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2542 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2543 "(%d) doesn't match the number hardware can supported "
2544 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2549 * Reading Redirection Table. There are 128 8bit-entries which can be
2550 * manage as 32 32bit-entries
2552 for (i = 0; i < reta_size; i += 4) {
2553 /* Handling 4 RSS entries per loop */
2554 idx = i / RTE_RETA_GROUP_SIZE;
2555 shift = i % RTE_RETA_GROUP_SIZE;
2556 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2561 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2563 for (j = 0; j < 4; j++) {
2564 if (!(mask & (0x1 << j)))
2566 reta_conf[idx].reta[shift + j] =
2567 (uint8_t)((reta >> (8 * j)) & 0xF);
2574 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2575 struct rte_eth_rss_conf *rss_conf)
2577 struct nfp_net_hw *hw;
2579 uint32_t cfg_rss_ctrl = 0;
2583 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2585 /* Writing the key byte a byte */
2586 for (i = 0; i < rss_conf->rss_key_len; i++) {
2587 memcpy(&key, &rss_conf->rss_key[i], 1);
2588 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2591 rss_hf = rss_conf->rss_hf;
2593 if (rss_hf & ETH_RSS_IPV4)
2594 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
2596 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2597 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
2599 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2600 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
2602 if (rss_hf & ETH_RSS_IPV6)
2603 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
2605 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2606 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
2608 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2609 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
2611 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2612 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2614 /* configuring where to apply the RSS hash */
2615 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2617 /* Writing the key size */
2618 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2624 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2625 struct rte_eth_rss_conf *rss_conf)
2629 struct nfp_net_hw *hw;
2631 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2633 rss_hf = rss_conf->rss_hf;
2635 /* Checking if RSS is enabled */
2636 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2637 if (rss_hf != 0) { /* Enable RSS? */
2638 PMD_DRV_LOG(ERR, "RSS unsupported");
2641 return 0; /* Nothing to do */
2644 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2645 PMD_DRV_LOG(ERR, "hash key too long");
2649 nfp_net_rss_hash_write(dev, rss_conf);
2651 update = NFP_NET_CFG_UPDATE_RSS;
2653 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2660 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2661 struct rte_eth_rss_conf *rss_conf)
2664 uint32_t cfg_rss_ctrl;
2667 struct nfp_net_hw *hw;
2669 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2671 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2674 rss_hf = rss_conf->rss_hf;
2675 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2677 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2678 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2680 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2681 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2683 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2684 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2686 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2687 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2689 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2690 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2692 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2693 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2695 /* Propagate current RSS hash functions to caller */
2696 rss_conf->rss_hf = rss_hf;
2698 /* Reading the key size */
2699 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2701 /* Reading the key byte a byte */
2702 for (i = 0; i < rss_conf->rss_key_len; i++) {
2703 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2704 memcpy(&rss_conf->rss_key[i], &key, 1);
2711 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2713 struct rte_eth_conf *dev_conf;
2714 struct rte_eth_rss_conf rss_conf;
2715 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2716 uint16_t rx_queues = dev->data->nb_rx_queues;
2720 PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
2723 nfp_reta_conf[0].mask = ~0x0;
2724 nfp_reta_conf[1].mask = ~0x0;
2727 for (i = 0; i < 0x40; i += 8) {
2728 for (j = i; j < (i + 8); j++) {
2729 nfp_reta_conf[0].reta[j] = queue;
2730 nfp_reta_conf[1].reta[j] = queue++;
2734 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2738 dev_conf = &dev->data->dev_conf;
2740 PMD_DRV_LOG(INFO, "wrong rss conf");
2743 rss_conf = dev_conf->rx_adv_conf.rss_conf;
2745 ret = nfp_net_rss_hash_write(dev, &rss_conf);
2751 /* Initialise and register driver with DPDK Application */
2752 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2753 .dev_configure = nfp_net_configure,
2754 .dev_start = nfp_net_start,
2755 .dev_stop = nfp_net_stop,
2756 .dev_set_link_up = nfp_net_set_link_up,
2757 .dev_set_link_down = nfp_net_set_link_down,
2758 .dev_close = nfp_net_close,
2759 .promiscuous_enable = nfp_net_promisc_enable,
2760 .promiscuous_disable = nfp_net_promisc_disable,
2761 .link_update = nfp_net_link_update,
2762 .stats_get = nfp_net_stats_get,
2763 .stats_reset = nfp_net_stats_reset,
2764 .dev_infos_get = nfp_net_infos_get,
2765 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2766 .mtu_set = nfp_net_dev_mtu_set,
2767 .mac_addr_set = nfp_set_mac_addr,
2768 .vlan_offload_set = nfp_net_vlan_offload_set,
2769 .reta_update = nfp_net_reta_update,
2770 .reta_query = nfp_net_reta_query,
2771 .rss_hash_update = nfp_net_rss_hash_update,
2772 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2773 .rx_queue_setup = nfp_net_rx_queue_setup,
2774 .rx_queue_release = nfp_net_rx_queue_release,
2775 .tx_queue_setup = nfp_net_tx_queue_setup,
2776 .tx_queue_release = nfp_net_tx_queue_release,
2777 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2778 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2783 nfp_net_init(struct rte_eth_dev *eth_dev)
2785 struct rte_pci_device *pci_dev;
2786 struct nfp_pf_dev *pf_dev;
2787 struct nfp_net_hw *hw;
2789 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2795 PMD_INIT_FUNC_TRACE();
2797 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2799 /* Use backpointer here to the PF of this eth_dev */
2800 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
2802 /* NFP can not handle DMA addresses requiring more than 40 bits */
2803 if (rte_mem_check_dma_mask(40)) {
2804 RTE_LOG(ERR, PMD, "device %s can not be used:",
2805 pci_dev->device.name);
2806 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
2810 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2811 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2812 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
2813 if (port < 0 || port > 7) {
2814 PMD_DRV_LOG(ERR, "Port value is wrong");
2818 /* Use PF array of physical ports to get pointer to
2819 * this specific port
2821 hw = pf_dev->ports[port];
2823 PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
2824 "NFP internal port number: %d",
2828 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2831 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2832 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
2833 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2834 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2836 /* For secondary processes, the primary has done all the work */
2837 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2840 rte_eth_copy_pci_info(eth_dev, pci_dev);
2842 hw->device_id = pci_dev->id.device_id;
2843 hw->vendor_id = pci_dev->id.vendor_id;
2844 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2845 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2847 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2848 pci_dev->id.vendor_id, pci_dev->id.device_id,
2849 pci_dev->addr.domain, pci_dev->addr.bus,
2850 pci_dev->addr.devid, pci_dev->addr.function);
2852 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2853 if (hw->ctrl_bar == NULL) {
2855 "hw->ctrl_bar is NULL. BAR0 not configured");
2859 if (hw->is_phyport) {
2861 hw->ctrl_bar = pf_dev->ctrl_bar;
2863 if (!pf_dev->ctrl_bar)
2865 /* Use port offset in pf ctrl_bar for this
2868 hw->ctrl_bar = pf_dev->ctrl_bar +
2869 (port * NFP_PF_CSR_SLICE_SIZE);
2873 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2875 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2876 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2878 /* Work out where in the BAR the queues start. */
2879 switch (pci_dev->id.device_id) {
2880 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2881 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2882 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2883 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2884 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2885 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2886 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2889 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
2891 goto dev_err_ctrl_map;
2894 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
2895 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
2897 if (hw->is_phyport) {
2898 hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
2899 hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
2900 eth_dev->data->dev_private = hw;
2902 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2904 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2908 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2909 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2911 nfp_net_cfg_queue_setup(hw);
2913 /* Get some of the read-only fields from the config BAR */
2914 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2915 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2916 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2917 hw->mtu = RTE_ETHER_MTU;
2919 /* VLAN insertion is incompatible with LSOv2 */
2920 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
2921 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
2923 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2924 hw->rx_offset = NFP_NET_RX_OFFSET;
2926 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2928 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
2929 NFD_CFG_MAJOR_VERSION_of(hw->ver),
2930 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
2932 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2933 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2934 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2935 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2936 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2937 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2938 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2939 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2940 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2941 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2942 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2943 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2944 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
2945 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2946 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
2950 hw->stride_rx = stride;
2951 hw->stride_tx = stride;
2953 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2954 hw->max_rx_queues, hw->max_tx_queues);
2956 /* Initializing spinlock for reconfigs */
2957 rte_spinlock_init(&hw->reconfig_lock);
2959 /* Allocating memory for mac addr */
2960 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2961 RTE_ETHER_ADDR_LEN, 0);
2962 if (eth_dev->data->mac_addrs == NULL) {
2963 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2965 goto dev_err_queues_map;
2968 if (hw->is_phyport) {
2969 nfp_net_pf_read_mac(pf_dev, port);
2970 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2972 nfp_net_vf_read_mac(hw);
2975 if (!rte_is_valid_assigned_ether_addr(
2976 (struct rte_ether_addr *)&hw->mac_addr)) {
2977 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
2979 /* Using random mac addresses for VFs */
2980 rte_eth_random_addr(&hw->mac_addr[0]);
2981 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2984 /* Copying mac address to DPDK eth_dev struct */
2985 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
2986 ð_dev->data->mac_addrs[0]);
2988 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
2989 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
2991 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2993 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2994 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2995 eth_dev->data->port_id, pci_dev->id.vendor_id,
2996 pci_dev->id.device_id,
2997 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2998 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
3000 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3001 /* Registering LSC interrupt handler */
3002 rte_intr_callback_register(&pci_dev->intr_handle,
3003 nfp_net_dev_interrupt_handler,
3005 /* Telling the firmware about the LSC interrupt entry */
3006 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3007 /* Recording current stats counters values */
3008 nfp_net_stats_reset(eth_dev);
3014 nfp_cpp_area_free(hw->hwqueues_area);
3016 nfp_cpp_area_free(hw->ctrl_area);
3021 #define NFP_CPP_MEMIO_BOUNDARY (1 << 20)
3024 * Serving a write request to NFP from host programs. The request
3025 * sends the write size and the CPP target. The bridge makes use
3026 * of CPP interface handler configured by the PMD setup.
3029 nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
3031 struct nfp_cpp_area *area;
3032 off_t offset, nfp_offset;
3033 uint32_t cpp_id, pos, len;
3034 uint32_t tmpbuf[16];
3035 size_t count, curlen, totlen = 0;
3038 PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
3039 sizeof(off_t), sizeof(size_t));
3041 /* Reading the count param */
3042 err = recv(sockfd, &count, sizeof(off_t), 0);
3043 if (err != sizeof(off_t))
3048 /* Reading the offset param */
3049 err = recv(sockfd, &offset, sizeof(off_t), 0);
3050 if (err != sizeof(off_t))
3053 /* Obtain target's CPP ID and offset in target */
3054 cpp_id = (offset >> 40) << 8;
3055 nfp_offset = offset & ((1ull << 40) - 1);
3057 PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
3059 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
3060 cpp_id, nfp_offset);
3062 /* Adjust length if not aligned */
3063 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3064 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3065 curlen = NFP_CPP_MEMIO_BOUNDARY -
3066 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3070 /* configure a CPP PCIe2CPP BAR for mapping the CPP target */
3071 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3072 nfp_offset, curlen);
3074 RTE_LOG(ERR, PMD, "%s: area alloc fail\n", __func__);
3078 /* mapping the target */
3079 err = nfp_cpp_area_acquire(area);
3081 RTE_LOG(ERR, PMD, "area acquire failed\n");
3082 nfp_cpp_area_free(area);
3086 for (pos = 0; pos < curlen; pos += len) {
3088 if (len > sizeof(tmpbuf))
3089 len = sizeof(tmpbuf);
3091 PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__,
3093 err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
3094 if (err != (int)len) {
3096 "%s: error when receiving, %d of %zu\n",
3097 __func__, err, count);
3098 nfp_cpp_area_release(area);
3099 nfp_cpp_area_free(area);
3102 err = nfp_cpp_area_write(area, pos, tmpbuf, len);
3104 RTE_LOG(ERR, PMD, "nfp_cpp_area_write error\n");
3105 nfp_cpp_area_release(area);
3106 nfp_cpp_area_free(area);
3113 nfp_cpp_area_release(area);
3114 nfp_cpp_area_free(area);
3117 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3118 NFP_CPP_MEMIO_BOUNDARY : count;
3125 * Serving a read request to NFP from host programs. The request
3126 * sends the read size and the CPP target. The bridge makes use
3127 * of CPP interface handler configured by the PMD setup. The read
3128 * data is sent to the requester using the same socket.
3131 nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
3133 struct nfp_cpp_area *area;
3134 off_t offset, nfp_offset;
3135 uint32_t cpp_id, pos, len;
3136 uint32_t tmpbuf[16];
3137 size_t count, curlen, totlen = 0;
3140 PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
3141 sizeof(off_t), sizeof(size_t));
3143 /* Reading the count param */
3144 err = recv(sockfd, &count, sizeof(off_t), 0);
3145 if (err != sizeof(off_t))
3150 /* Reading the offset param */
3151 err = recv(sockfd, &offset, sizeof(off_t), 0);
3152 if (err != sizeof(off_t))
3155 /* Obtain target's CPP ID and offset in target */
3156 cpp_id = (offset >> 40) << 8;
3157 nfp_offset = offset & ((1ull << 40) - 1);
3159 PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
3161 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
3162 cpp_id, nfp_offset);
3164 /* Adjust length if not aligned */
3165 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3166 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3167 curlen = NFP_CPP_MEMIO_BOUNDARY -
3168 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3172 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3173 nfp_offset, curlen);
3175 RTE_LOG(ERR, PMD, "%s: area alloc failed\n", __func__);
3179 err = nfp_cpp_area_acquire(area);
3181 RTE_LOG(ERR, PMD, "area acquire failed\n");
3182 nfp_cpp_area_free(area);
3186 for (pos = 0; pos < curlen; pos += len) {
3188 if (len > sizeof(tmpbuf))
3189 len = sizeof(tmpbuf);
3191 err = nfp_cpp_area_read(area, pos, tmpbuf, len);
3193 RTE_LOG(ERR, PMD, "nfp_cpp_area_read error\n");
3194 nfp_cpp_area_release(area);
3195 nfp_cpp_area_free(area);
3198 PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__,
3201 err = send(sockfd, tmpbuf, len, 0);
3202 if (err != (int)len) {
3204 "%s: error when sending: %d of %zu\n",
3205 __func__, err, count);
3206 nfp_cpp_area_release(area);
3207 nfp_cpp_area_free(area);
3214 nfp_cpp_area_release(area);
3215 nfp_cpp_area_free(area);
3218 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3219 NFP_CPP_MEMIO_BOUNDARY : count;
3224 #define NFP_IOCTL 'n'
3225 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
3227 * Serving a ioctl command from host NFP tools. This usually goes to
3228 * a kernel driver char driver but it is not available when the PF is
3229 * bound to the PMD. Currently just one ioctl command is served and it
3230 * does not require any CPP access at all.
3233 nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp)
3235 uint32_t cmd, ident_size, tmp;
3238 /* Reading now the IOCTL command */
3239 err = recv(sockfd, &cmd, 4, 0);
3241 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3245 /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
3246 if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) {
3247 RTE_LOG(ERR, PMD, "%s: unknown cmd %d\n", __func__, cmd);
3251 err = recv(sockfd, &ident_size, 4, 0);
3253 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3257 tmp = nfp_cpp_model(cpp);
3259 PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp);
3261 err = send(sockfd, &tmp, 4, 0);
3263 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3267 tmp = cpp->interface;
3269 PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp);
3271 err = send(sockfd, &tmp, 4, 0);
3273 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3280 #define NFP_BRIDGE_OP_READ 20
3281 #define NFP_BRIDGE_OP_WRITE 30
3282 #define NFP_BRIDGE_OP_IOCTL 40
3285 * This is the code to be executed by a service core. The CPP bridge interface
3286 * is based on a unix socket and requests usually received by a kernel char
3287 * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools
3288 * can be executed with a wrapper library and LD_LIBRARY being completely
3289 * unaware of the CPP bridge performing the NFP kernel char driver for CPP
3293 nfp_cpp_bridge_service_func(void *args)
3295 struct sockaddr address;
3296 struct nfp_cpp *cpp = args;
3297 int sockfd, datafd, op, ret;
3299 unlink("/tmp/nfp_cpp");
3300 sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
3302 RTE_LOG(ERR, PMD, "%s: socket creation error. Service failed\n",
3307 memset(&address, 0, sizeof(struct sockaddr));
3309 address.sa_family = AF_UNIX;
3310 strcpy(address.sa_data, "/tmp/nfp_cpp");
3312 ret = bind(sockfd, (const struct sockaddr *)&address,
3313 sizeof(struct sockaddr));
3315 RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n",
3321 ret = listen(sockfd, 20);
3323 RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n",
3330 datafd = accept(sockfd, NULL, NULL);
3332 RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n",
3334 RTE_LOG(ERR, PMD, "%s: service failed\n", __func__);
3340 ret = recv(datafd, &op, 4, 0);
3342 PMD_CPP_LOG(DEBUG, "%s: socket close\n",
3347 PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op);
3349 if (op == NFP_BRIDGE_OP_READ)
3350 nfp_cpp_bridge_serve_read(datafd, cpp);
3352 if (op == NFP_BRIDGE_OP_WRITE)
3353 nfp_cpp_bridge_serve_write(datafd, cpp);
3355 if (op == NFP_BRIDGE_OP_IOCTL)
3356 nfp_cpp_bridge_serve_ioctl(datafd, cpp);
3368 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
3371 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3373 struct nfp_cpp *cpp = nsp->cpp;
3379 /* Looking for firmware file in order of priority */
3381 /* First try to find a firmware image specific for this device */
3382 snprintf(serial, sizeof(serial),
3383 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3384 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3385 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3386 cpp->interface & 0xff);
3388 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
3390 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3391 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
3394 /* Then try the PCI name */
3395 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
3397 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3398 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
3401 /* Finally try the card type and media */
3402 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
3403 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3404 if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
3405 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
3410 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
3413 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
3414 nfp_nsp_load_fw(nsp, fw_buf, fsize);
3415 PMD_DRV_LOG(INFO, "Done");
3422 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3423 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3425 struct nfp_nsp *nsp;
3426 const char *nfp_fw_model;
3427 char card_desc[100];
3430 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3433 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
3435 PMD_DRV_LOG(ERR, "firmware model NOT found");
3439 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3440 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
3441 nfp_eth_table->count);
3445 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
3446 nfp_eth_table->count);
3448 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
3450 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
3451 nfp_fw_model, nfp_eth_table->count,
3452 nfp_eth_table->ports[0].speed / 1000);
3454 nsp = nfp_nsp_open(cpp);
3456 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
3460 nfp_nsp_device_soft_reset(nsp);
3461 err = nfp_fw_upload(dev, nsp, card_desc);
3467 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
3469 struct nfp_net_hw *hw;
3470 struct rte_eth_dev *eth_dev;
3471 struct nfp_eth_table *nfp_eth_table = NULL;
3475 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
3476 if (!nfp_eth_table) {
3477 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
3482 /* Loop through all physical ports on PF */
3483 for (i = 0; i < pf_dev->total_phyports; i++) {
3484 const unsigned int numa_node = rte_socket_id();
3485 char port_name[RTE_ETH_NAME_MAX_LEN];
3487 snprintf(port_name, sizeof(port_name), "%s_port%d",
3488 pf_dev->pci_dev->device.name, i);
3490 /* Allocate a eth_dev for this phyport */
3491 eth_dev = rte_eth_dev_allocate(port_name);
3497 /* Allocate memory for this phyport */
3498 eth_dev->data->dev_private =
3499 rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
3500 RTE_CACHE_LINE_SIZE, numa_node);
3501 if (!eth_dev->data->dev_private) {
3503 rte_eth_dev_release_port(eth_dev);
3507 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3509 /* Add this device to the PF's array of physical ports */
3510 pf_dev->ports[i] = hw;
3512 hw->pf_dev = pf_dev;
3513 hw->cpp = pf_dev->cpp;
3514 hw->eth_dev = eth_dev;
3516 hw->nfp_idx = nfp_eth_table->ports[i].index;
3517 hw->is_phyport = true;
3519 eth_dev->device = &pf_dev->pci_dev->device;
3521 /* ctrl/tx/rx BAR mappings and remaining init happens in
3524 ret = nfp_net_init(eth_dev);
3531 rte_eth_dev_probing_finish(eth_dev);
3533 } /* End loop, all ports on this PF */
3535 goto eth_table_cleanup;
3538 for (i = 0; i < pf_dev->total_phyports; i++) {
3539 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
3540 struct rte_eth_dev *tmp_dev;
3541 tmp_dev = pf_dev->ports[i]->eth_dev;
3542 rte_eth_dev_release_port(tmp_dev);
3543 pf_dev->ports[i] = NULL;
3547 free(nfp_eth_table);
3552 static void nfp_register_cpp_service(struct nfp_cpp *cpp)
3554 uint32_t *cpp_service_id = NULL;
3555 struct rte_service_spec service;
3557 memset(&service, 0, sizeof(struct rte_service_spec));
3558 snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
3559 service.callback = nfp_cpp_bridge_service_func;
3560 service.callback_userdata = (void *)cpp;
3562 if (rte_service_component_register(&service,
3564 RTE_LOG(WARNING, PMD, "NFP CPP bridge service register() failed");
3566 RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
3569 static int nfp_pf_init(struct rte_pci_device *pci_dev)
3571 struct nfp_pf_dev *pf_dev = NULL;
3572 struct nfp_cpp *cpp;
3573 struct nfp_hwinfo *hwinfo;
3574 struct nfp_rtsym_table *sym_tbl;
3575 struct nfp_eth_table *nfp_eth_table = NULL;
3576 char name[RTE_ETH_NAME_MAX_LEN];
3585 * When device bound to UIO, the device could be used, by mistake,
3586 * by two DPDK apps, and the UIO driver does not avoid it. This
3587 * could lead to a serious problem when configuring the NFP CPP
3588 * interface. Here we avoid this telling to the CPP init code to
3589 * use a lock file if UIO is being used.
3591 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
3592 cpp = nfp_cpp_from_device_name(pci_dev, 0);
3594 cpp = nfp_cpp_from_device_name(pci_dev, 1);
3597 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
3602 hwinfo = nfp_hwinfo_read(cpp);
3604 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
3609 nfp_eth_table = nfp_eth_read_ports(cpp);
3610 if (!nfp_eth_table) {
3611 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
3613 goto hwinfo_cleanup;
3616 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
3617 PMD_INIT_LOG(ERR, "Error when uploading firmware");
3619 goto eth_table_cleanup;
3622 /* Now the symbol table should be there */
3623 sym_tbl = nfp_rtsym_table_read(cpp);
3625 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
3628 goto eth_table_cleanup;
3631 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3632 if (total_ports != (int)nfp_eth_table->count) {
3633 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
3635 goto sym_tbl_cleanup;
3638 PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
3640 if (total_ports <= 0 || total_ports > 8) {
3641 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
3643 goto sym_tbl_cleanup;
3645 /* Allocate memory for the PF "device" */
3646 snprintf(name, sizeof(name), "nfp_pf%d", 0);
3647 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
3650 goto sym_tbl_cleanup;
3653 /* Populate the newly created PF device */
3655 pf_dev->hwinfo = hwinfo;
3656 pf_dev->sym_tbl = sym_tbl;
3657 pf_dev->total_phyports = total_ports;
3659 if (total_ports > 1)
3660 pf_dev->multiport = true;
3662 pf_dev->pci_dev = pci_dev;
3664 /* Map the symbol table */
3665 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
3666 pf_dev->total_phyports * 32768,
3667 &pf_dev->ctrl_area);
3668 if (!pf_dev->ctrl_bar) {
3669 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
3674 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
3676 /* configure access to tx/rx vNIC BARs */
3677 pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
3679 NFP_QCP_QUEUE_AREA_SZ,
3680 &pf_dev->hwqueues_area);
3681 if (!pf_dev->hw_queues) {
3682 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
3684 goto ctrl_area_cleanup;
3687 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
3689 /* Initialize and prep physical ports now
3690 * This will loop through all physical ports
3692 ret = nfp_init_phyports(pf_dev);
3694 PMD_INIT_LOG(ERR, "Could not create physical ports");
3695 goto hwqueues_cleanup;
3698 /* register the CPP bridge service here for primary use */
3699 nfp_register_cpp_service(pf_dev->cpp);
3704 nfp_cpp_area_free(pf_dev->hwqueues_area);
3706 nfp_cpp_area_free(pf_dev->ctrl_area);
3712 free(nfp_eth_table);
3720 * When attaching to the NFP4000/6000 PF on a secondary process there
3721 * is no need to initialize the PF again. Only minimal work is required
3724 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
3726 struct nfp_cpp *cpp;
3727 struct nfp_rtsym_table *sym_tbl;
3736 * When device bound to UIO, the device could be used, by mistake,
3737 * by two DPDK apps, and the UIO driver does not avoid it. This
3738 * could lead to a serious problem when configuring the NFP CPP
3739 * interface. Here we avoid this telling to the CPP init code to
3740 * use a lock file if UIO is being used.
3742 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
3743 cpp = nfp_cpp_from_device_name(pci_dev, 0);
3745 cpp = nfp_cpp_from_device_name(pci_dev, 1);
3748 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
3753 * We don't have access to the PF created in the primary process
3754 * here so we have to read the number of ports from firmware
3756 sym_tbl = nfp_rtsym_table_read(cpp);
3758 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
3763 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3765 for (i = 0; i < total_ports; i++) {
3766 struct rte_eth_dev *eth_dev;
3767 char port_name[RTE_ETH_NAME_MAX_LEN];
3769 snprintf(port_name, sizeof(port_name), "%s_port%d",
3770 pci_dev->device.name, i);
3772 PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s",
3774 eth_dev = rte_eth_dev_attach_secondary(port_name);
3777 "secondary process attach failed, "
3778 "ethdev doesn't exist");
3781 eth_dev->process_private = cpp;
3782 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
3783 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
3784 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
3785 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
3786 rte_eth_dev_probing_finish(eth_dev);
3789 /* Register the CPP bridge service for the secondary too */
3790 nfp_register_cpp_service(cpp);
3795 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3796 struct rte_pci_device *dev)
3798 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3799 return nfp_pf_init(dev);
3801 return nfp_pf_secondary_init(dev);
3804 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3806 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3807 PCI_DEVICE_ID_NFP4000_PF_NIC)
3810 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3811 PCI_DEVICE_ID_NFP6000_PF_NIC)
3818 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3820 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3821 PCI_DEVICE_ID_NFP6000_VF_NIC)
3828 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
3830 struct rte_pci_device *pci_dev;
3833 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3835 if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
3836 pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
3837 /* Free up all physical ports under PF */
3838 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
3839 rte_eth_dev_close(port_id);
3841 * Ports can be closed and freed but hotplugging is not
3842 * currently supported
3847 /* VF cleanup, just free private port data */
3848 return nfp_net_close(eth_dev);
3851 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3852 struct rte_pci_device *pci_dev)
3854 return rte_eth_dev_pci_generic_probe(pci_dev,
3855 sizeof(struct nfp_net_adapter), nfp_net_init);
3858 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3860 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
3863 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3864 .id_table = pci_id_nfp_pf_net_map,
3865 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3866 .probe = nfp_pf_pci_probe,
3867 .remove = eth_nfp_pci_remove,
3870 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3871 .id_table = pci_id_nfp_vf_net_map,
3872 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3873 .probe = eth_nfp_pci_probe,
3874 .remove = eth_nfp_pci_remove,
3877 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3878 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3879 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3880 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3881 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3882 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3883 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
3884 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
3887 * c-file-style: "Linux"
3888 * indent-tabs-mode: t