2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev_driver.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
57 #include <rte_service_component.h>
59 #include "nfpcore/nfp_cpp.h"
60 #include "nfpcore/nfp_nffw.h"
61 #include "nfpcore/nfp_hwinfo.h"
62 #include "nfpcore/nfp_mip.h"
63 #include "nfpcore/nfp_rtsym.h"
64 #include "nfpcore/nfp_nsp.h"
66 #include "nfp_net_pmd.h"
67 #include "nfp_net_logs.h"
68 #include "nfp_net_ctrl.h"
70 #include <sys/types.h>
71 #include <sys/socket.h>
75 #include <sys/ioctl.h>
79 static void nfp_net_close(struct rte_eth_dev *dev);
80 static int nfp_net_configure(struct rte_eth_dev *dev);
81 static void nfp_net_dev_interrupt_handler(void *param);
82 static void nfp_net_dev_interrupt_delayed_handler(void *param);
83 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
84 static int nfp_net_infos_get(struct rte_eth_dev *dev,
85 struct rte_eth_dev_info *dev_info);
86 static int nfp_net_init(struct rte_eth_dev *eth_dev);
87 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
88 static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
89 static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
90 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
91 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
93 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
95 static void nfp_net_rx_queue_release(void *rxq);
96 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
97 uint16_t nb_desc, unsigned int socket_id,
98 const struct rte_eth_rxconf *rx_conf,
99 struct rte_mempool *mp);
100 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
101 static void nfp_net_tx_queue_release(void *txq);
102 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
103 uint16_t nb_desc, unsigned int socket_id,
104 const struct rte_eth_txconf *tx_conf);
105 static int nfp_net_start(struct rte_eth_dev *dev);
106 static int nfp_net_stats_get(struct rte_eth_dev *dev,
107 struct rte_eth_stats *stats);
108 static int nfp_net_stats_reset(struct rte_eth_dev *dev);
109 static void nfp_net_stop(struct rte_eth_dev *dev);
110 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
113 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
114 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
115 struct rte_eth_rss_conf *rss_conf);
116 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
119 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
120 struct rte_eth_rss_conf *rss_conf);
121 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
122 struct rte_ether_addr *mac_addr);
124 /* The offset of the queue controller queues in the PCIe Target */
125 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
127 /* Maximum value which can be added to a queue with one transaction */
128 #define NFP_QCP_MAX_ADD 0x7f
130 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
131 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
133 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
135 NFP_QCP_READ_PTR = 0,
140 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
141 * @q: Base address for queue structure
142 * @ptr: Add to the Read or Write pointer
143 * @val: Value to add to the queue pointer
145 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
148 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
152 if (ptr == NFP_QCP_READ_PTR)
153 off = NFP_QCP_QUEUE_ADD_RPTR;
155 off = NFP_QCP_QUEUE_ADD_WPTR;
157 while (val > NFP_QCP_MAX_ADD) {
158 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
159 val -= NFP_QCP_MAX_ADD;
162 nn_writel(rte_cpu_to_le_32(val), q + off);
166 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
167 * @q: Base address for queue structure
168 * @ptr: Read or Write pointer
170 static inline uint32_t
171 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
176 if (ptr == NFP_QCP_READ_PTR)
177 off = NFP_QCP_QUEUE_STS_LO;
179 off = NFP_QCP_QUEUE_STS_HI;
181 val = rte_cpu_to_le_32(nn_readl(q + off));
183 if (ptr == NFP_QCP_READ_PTR)
184 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
186 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
190 * Functions to read/write from/to Config BAR
191 * Performs any endian conversion necessary.
193 static inline uint8_t
194 nn_cfg_readb(struct nfp_net_hw *hw, int off)
196 return nn_readb(hw->ctrl_bar + off);
200 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
202 nn_writeb(val, hw->ctrl_bar + off);
205 static inline uint32_t
206 nn_cfg_readl(struct nfp_net_hw *hw, int off)
208 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
212 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
214 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
217 static inline uint64_t
218 nn_cfg_readq(struct nfp_net_hw *hw, int off)
220 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
224 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
226 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
230 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
234 if (rxq->rxbufs == NULL)
237 for (i = 0; i < rxq->rx_count; i++) {
238 if (rxq->rxbufs[i].mbuf) {
239 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
240 rxq->rxbufs[i].mbuf = NULL;
246 nfp_net_rx_queue_release(void *rx_queue)
248 struct nfp_net_rxq *rxq = rx_queue;
251 nfp_net_rx_queue_release_mbufs(rxq);
252 rte_free(rxq->rxbufs);
258 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
260 nfp_net_rx_queue_release_mbufs(rxq);
266 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
270 if (txq->txbufs == NULL)
273 for (i = 0; i < txq->tx_count; i++) {
274 if (txq->txbufs[i].mbuf) {
275 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
276 txq->txbufs[i].mbuf = NULL;
282 nfp_net_tx_queue_release(void *tx_queue)
284 struct nfp_net_txq *txq = tx_queue;
287 nfp_net_tx_queue_release_mbufs(txq);
288 rte_free(txq->txbufs);
294 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
296 nfp_net_tx_queue_release_mbufs(txq);
302 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
306 struct timespec wait;
308 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
311 if (hw->qcp_cfg == NULL)
312 rte_panic("Bad configuration queue pointer\n");
314 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
317 wait.tv_nsec = 1000000;
319 PMD_DRV_LOG(DEBUG, "Polling for update ack...");
321 /* Poll update field, waiting for NFP to ack the config */
322 for (cnt = 0; ; cnt++) {
323 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
326 if (new & NFP_NET_CFG_UPDATE_ERR) {
327 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
330 if (cnt >= NFP_NET_POLL_TIMEOUT) {
331 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
332 " %dms", update, cnt);
333 rte_panic("Exiting\n");
335 nanosleep(&wait, 0); /* waiting for a 1ms */
337 PMD_DRV_LOG(DEBUG, "Ack DONE");
342 * Reconfigure the NIC
343 * @nn: device to reconfigure
344 * @ctrl: The value for the ctrl field in the BAR config
345 * @update: The value for the update field in the BAR config
347 * Write the update word to the BAR and ping the reconfig queue. Then poll
348 * until the firmware has acknowledged the update by zeroing the update word.
351 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
355 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
358 rte_spinlock_lock(&hw->reconfig_lock);
360 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
361 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
365 err = __nfp_net_reconfig(hw, update);
367 rte_spinlock_unlock(&hw->reconfig_lock);
373 * Reconfig errors imply situations where they can be handled.
374 * Otherwise, rte_panic is called inside __nfp_net_reconfig
376 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
382 * Configure an Ethernet device. This function must be invoked first
383 * before any other function in the Ethernet API. This function can
384 * also be re-invoked when a device is in the stopped state.
387 nfp_net_configure(struct rte_eth_dev *dev)
389 struct rte_eth_conf *dev_conf;
390 struct rte_eth_rxmode *rxmode;
391 struct rte_eth_txmode *txmode;
392 struct nfp_net_hw *hw;
394 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
397 * A DPDK app sends info about how many queues to use and how
398 * those queues need to be configured. This is used by the
399 * DPDK core and it makes sure no more queues than those
400 * advertised by the driver are requested. This function is
401 * called after that internal process
404 PMD_INIT_LOG(DEBUG, "Configure");
406 dev_conf = &dev->data->dev_conf;
407 rxmode = &dev_conf->rxmode;
408 txmode = &dev_conf->txmode;
410 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
411 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
413 /* Checking TX mode */
414 if (txmode->mq_mode) {
415 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
419 /* Checking RX mode */
420 if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
421 !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
422 PMD_INIT_LOG(INFO, "RSS not supported");
430 nfp_net_enable_queues(struct rte_eth_dev *dev)
432 struct nfp_net_hw *hw;
433 uint64_t enabled_queues = 0;
436 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 /* Enabling the required TX queues in the device */
439 for (i = 0; i < dev->data->nb_tx_queues; i++)
440 enabled_queues |= (1 << i);
442 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
446 /* Enabling the required RX queues in the device */
447 for (i = 0; i < dev->data->nb_rx_queues; i++)
448 enabled_queues |= (1 << i);
450 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
454 nfp_net_disable_queues(struct rte_eth_dev *dev)
456 struct nfp_net_hw *hw;
457 uint32_t new_ctrl, update = 0;
459 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
461 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
462 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
464 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
465 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
466 NFP_NET_CFG_UPDATE_MSIX;
468 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
469 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
471 /* If an error when reconfig we avoid to change hw state */
472 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
479 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
483 for (i = 0; i < dev->data->nb_rx_queues; i++) {
484 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
491 nfp_net_params_setup(struct nfp_net_hw *hw)
493 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
494 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
498 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
500 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
503 #define ETH_ADDR_LEN 6
506 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
510 for (i = 0; i < ETH_ADDR_LEN; i++)
515 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
517 struct nfp_eth_table *nfp_eth_table;
519 nfp_eth_table = nfp_eth_read_ports(hw->cpp);
521 * hw points to port0 private data. We need hw now pointing to
525 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
526 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
533 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
537 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
538 memcpy(&hw->mac_addr[0], &tmp, 4);
540 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
541 memcpy(&hw->mac_addr[4], &tmp, 2);
545 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
547 uint32_t mac0 = *(uint32_t *)mac;
550 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
553 mac1 = *(uint16_t *)mac;
554 nn_writew(rte_cpu_to_be_16(mac1),
555 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
559 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
561 struct nfp_net_hw *hw;
562 uint32_t update, ctrl;
564 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
565 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
566 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
567 PMD_INIT_LOG(INFO, "MAC address unable to change when"
572 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
573 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
576 /* Writing new MAC to the specific port BAR address */
577 nfp_net_write_mac(hw, (uint8_t *)mac_addr);
579 /* Signal the NIC about the change */
580 update = NFP_NET_CFG_UPDATE_MACADDR;
582 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
583 (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
584 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
585 if (nfp_net_reconfig(hw, ctrl, update) < 0) {
586 PMD_INIT_LOG(INFO, "MAC address update failed");
593 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
594 struct rte_intr_handle *intr_handle)
596 struct nfp_net_hw *hw;
599 if (!intr_handle->intr_vec) {
600 intr_handle->intr_vec =
601 rte_zmalloc("intr_vec",
602 dev->data->nb_rx_queues * sizeof(int), 0);
603 if (!intr_handle->intr_vec) {
604 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
605 " intr_vec", dev->data->nb_rx_queues);
610 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
613 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
614 /* UIO just supports one queue and no LSC*/
615 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
616 intr_handle->intr_vec[0] = 0;
618 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
619 for (i = 0; i < dev->data->nb_rx_queues; i++) {
621 * The first msix vector is reserved for non
624 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
625 intr_handle->intr_vec[i] = i + 1;
626 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
627 intr_handle->intr_vec[i]);
631 /* Avoiding TX interrupts */
632 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
637 nfp_check_offloads(struct rte_eth_dev *dev)
639 struct nfp_net_hw *hw;
640 struct rte_eth_conf *dev_conf;
641 struct rte_eth_rxmode *rxmode;
642 struct rte_eth_txmode *txmode;
645 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 dev_conf = &dev->data->dev_conf;
648 rxmode = &dev_conf->rxmode;
649 txmode = &dev_conf->txmode;
651 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
652 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
653 ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
656 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
657 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
658 ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
661 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
662 hw->mtu = rxmode->max_rx_pkt_len;
664 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
665 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
668 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
669 ctrl |= NFP_NET_CFG_CTRL_L2BC;
672 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
673 ctrl |= NFP_NET_CFG_CTRL_L2MC;
675 /* TX checksum offload */
676 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
677 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
678 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
679 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
682 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
683 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
684 ctrl |= NFP_NET_CFG_CTRL_LSO;
686 ctrl |= NFP_NET_CFG_CTRL_LSO2;
690 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
691 ctrl |= NFP_NET_CFG_CTRL_GATHER;
697 nfp_net_start(struct rte_eth_dev *dev)
699 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
700 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
701 uint32_t new_ctrl, update = 0;
702 struct nfp_net_hw *hw;
703 struct rte_eth_conf *dev_conf;
704 struct rte_eth_rxmode *rxmode;
705 uint32_t intr_vector;
708 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710 PMD_INIT_LOG(DEBUG, "Start");
712 /* Disabling queues just in case... */
713 nfp_net_disable_queues(dev);
715 /* Enabling the required queues in the device */
716 nfp_net_enable_queues(dev);
718 /* check and configure queue intr-vector mapping */
719 if (dev->data->dev_conf.intr_conf.rxq != 0) {
720 if (hw->pf_multiport_enabled) {
721 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
722 "with NFP multiport PF");
725 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
727 * Better not to share LSC with RX interrupts.
728 * Unregistering LSC interrupt handler
730 rte_intr_callback_unregister(&pci_dev->intr_handle,
731 nfp_net_dev_interrupt_handler, (void *)dev);
733 if (dev->data->nb_rx_queues > 1) {
734 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
735 "supports 1 queue with UIO");
739 intr_vector = dev->data->nb_rx_queues;
740 if (rte_intr_efd_enable(intr_handle, intr_vector))
743 nfp_configure_rx_interrupt(dev, intr_handle);
744 update = NFP_NET_CFG_UPDATE_MSIX;
747 rte_intr_enable(intr_handle);
749 new_ctrl = nfp_check_offloads(dev);
751 /* Writing configuration parameters in the device */
752 nfp_net_params_setup(hw);
754 dev_conf = &dev->data->dev_conf;
755 rxmode = &dev_conf->rxmode;
757 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
758 nfp_net_rss_config_default(dev);
759 update |= NFP_NET_CFG_UPDATE_RSS;
760 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
764 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
766 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
768 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
769 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
771 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
772 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
776 * Allocating rte mbufs for configured rx queues.
777 * This requires queues being enabled before
779 if (nfp_net_rx_freelist_setup(dev) < 0) {
785 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
786 /* Configure the physical port up */
787 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
789 nfp_eth_set_configured(dev->process_private,
799 * An error returned by this function should mean the app
800 * exiting and then the system releasing all the memory
801 * allocated even memory coming from hugepages.
803 * The device could be enabled at this point with some queues
804 * ready for getting packets. This is true if the call to
805 * nfp_net_rx_freelist_setup() succeeds for some queues but
806 * fails for subsequent queues.
808 * This should make the app exiting but better if we tell the
811 nfp_net_disable_queues(dev);
816 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
818 nfp_net_stop(struct rte_eth_dev *dev)
821 struct nfp_net_hw *hw;
823 PMD_INIT_LOG(DEBUG, "Stop");
825 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
827 nfp_net_disable_queues(dev);
830 for (i = 0; i < dev->data->nb_tx_queues; i++) {
831 nfp_net_reset_tx_queue(
832 (struct nfp_net_txq *)dev->data->tx_queues[i]);
835 for (i = 0; i < dev->data->nb_rx_queues; i++) {
836 nfp_net_reset_rx_queue(
837 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
841 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
842 /* Configure the physical port down */
843 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
845 nfp_eth_set_configured(dev->process_private,
850 /* Set the link up. */
852 nfp_net_set_link_up(struct rte_eth_dev *dev)
854 struct nfp_net_hw *hw;
856 PMD_DRV_LOG(DEBUG, "Set link up");
858 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
863 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
864 /* Configure the physical port down */
865 return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
867 return nfp_eth_set_configured(dev->process_private,
871 /* Set the link down. */
873 nfp_net_set_link_down(struct rte_eth_dev *dev)
875 struct nfp_net_hw *hw;
877 PMD_DRV_LOG(DEBUG, "Set link down");
879 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
884 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
885 /* Configure the physical port down */
886 return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
888 return nfp_eth_set_configured(dev->process_private,
892 /* Reset and stop device. The device can not be restarted. */
894 nfp_net_close(struct rte_eth_dev *dev)
896 struct nfp_net_hw *hw;
897 struct rte_pci_device *pci_dev;
900 PMD_INIT_LOG(DEBUG, "Close");
902 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
903 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
906 * We assume that the DPDK application is stopping all the
907 * threads/queues before calling the device close function.
910 nfp_net_disable_queues(dev);
913 for (i = 0; i < dev->data->nb_tx_queues; i++) {
914 nfp_net_reset_tx_queue(
915 (struct nfp_net_txq *)dev->data->tx_queues[i]);
918 for (i = 0; i < dev->data->nb_rx_queues; i++) {
919 nfp_net_reset_rx_queue(
920 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
923 rte_intr_disable(&pci_dev->intr_handle);
924 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
926 /* unregister callback func from eal lib */
927 rte_intr_callback_unregister(&pci_dev->intr_handle,
928 nfp_net_dev_interrupt_handler,
932 * The ixgbe PMD driver disables the pcie master on the
933 * device. The i40e does not...
938 nfp_net_promisc_enable(struct rte_eth_dev *dev)
940 uint32_t new_ctrl, update = 0;
941 struct nfp_net_hw *hw;
944 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
946 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
948 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
949 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
953 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
954 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
958 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
959 update = NFP_NET_CFG_UPDATE_GEN;
962 * DPDK sets promiscuous mode on just after this call assuming
963 * it can not fail ...
965 ret = nfp_net_reconfig(hw, new_ctrl, update);
975 nfp_net_promisc_disable(struct rte_eth_dev *dev)
977 uint32_t new_ctrl, update = 0;
978 struct nfp_net_hw *hw;
981 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
983 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
984 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
988 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
989 update = NFP_NET_CFG_UPDATE_GEN;
992 * DPDK sets promiscuous mode off just before this call
993 * assuming it can not fail ...
995 ret = nfp_net_reconfig(hw, new_ctrl, update);
1005 * return 0 means link status changed, -1 means not changed
1007 * Wait to complete is needed as it can take up to 9 seconds to get the Link
1011 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1013 struct nfp_net_hw *hw;
1014 struct rte_eth_link link;
1015 uint32_t nn_link_status;
1018 static const uint32_t ls_to_ethtool[] = {
1019 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1020 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
1021 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
1022 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
1023 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
1024 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
1025 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
1026 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
1029 PMD_DRV_LOG(DEBUG, "Link update");
1031 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1033 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1035 memset(&link, 0, sizeof(struct rte_eth_link));
1037 if (nn_link_status & NFP_NET_CFG_STS_LINK)
1038 link.link_status = ETH_LINK_UP;
1040 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1042 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1043 NFP_NET_CFG_STS_LINK_RATE_MASK;
1045 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1046 link.link_speed = ETH_SPEED_NUM_NONE;
1048 link.link_speed = ls_to_ethtool[nn_link_status];
1050 ret = rte_eth_linkstatus_set(dev, &link);
1052 if (link.link_status)
1053 PMD_DRV_LOG(INFO, "NIC Link is Up");
1055 PMD_DRV_LOG(INFO, "NIC Link is Down");
1061 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1064 struct nfp_net_hw *hw;
1065 struct rte_eth_stats nfp_dev_stats;
1067 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1069 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1071 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1073 /* reading per RX ring stats */
1074 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1075 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1078 nfp_dev_stats.q_ipackets[i] =
1079 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1081 nfp_dev_stats.q_ipackets[i] -=
1082 hw->eth_stats_base.q_ipackets[i];
1084 nfp_dev_stats.q_ibytes[i] =
1085 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1087 nfp_dev_stats.q_ibytes[i] -=
1088 hw->eth_stats_base.q_ibytes[i];
1091 /* reading per TX ring stats */
1092 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1093 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1096 nfp_dev_stats.q_opackets[i] =
1097 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1099 nfp_dev_stats.q_opackets[i] -=
1100 hw->eth_stats_base.q_opackets[i];
1102 nfp_dev_stats.q_obytes[i] =
1103 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1105 nfp_dev_stats.q_obytes[i] -=
1106 hw->eth_stats_base.q_obytes[i];
1109 nfp_dev_stats.ipackets =
1110 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1112 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1114 nfp_dev_stats.ibytes =
1115 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1117 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1119 nfp_dev_stats.opackets =
1120 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1122 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1124 nfp_dev_stats.obytes =
1125 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1127 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1129 /* reading general device stats */
1130 nfp_dev_stats.ierrors =
1131 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1133 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1135 nfp_dev_stats.oerrors =
1136 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1138 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1140 /* RX ring mbuf allocation failures */
1141 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1143 nfp_dev_stats.imissed =
1144 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1146 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1149 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1156 nfp_net_stats_reset(struct rte_eth_dev *dev)
1159 struct nfp_net_hw *hw;
1161 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1164 * hw->eth_stats_base records the per counter starting point.
1165 * Lets update it now
1168 /* reading per RX ring stats */
1169 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1170 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1173 hw->eth_stats_base.q_ipackets[i] =
1174 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1176 hw->eth_stats_base.q_ibytes[i] =
1177 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1180 /* reading per TX ring stats */
1181 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1182 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1185 hw->eth_stats_base.q_opackets[i] =
1186 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1188 hw->eth_stats_base.q_obytes[i] =
1189 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1192 hw->eth_stats_base.ipackets =
1193 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1195 hw->eth_stats_base.ibytes =
1196 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1198 hw->eth_stats_base.opackets =
1199 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1201 hw->eth_stats_base.obytes =
1202 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1204 /* reading general device stats */
1205 hw->eth_stats_base.ierrors =
1206 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1208 hw->eth_stats_base.oerrors =
1209 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1211 /* RX ring mbuf allocation failures */
1212 dev->data->rx_mbuf_alloc_failed = 0;
1214 hw->eth_stats_base.imissed =
1215 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1221 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1223 struct nfp_net_hw *hw;
1225 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1228 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1229 dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1230 dev_info->max_rx_pktlen = hw->max_mtu;
1231 /* Next should change when PF support is implemented */
1232 dev_info->max_mac_addrs = 1;
1234 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1235 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1237 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1238 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1239 DEV_RX_OFFLOAD_UDP_CKSUM |
1240 DEV_RX_OFFLOAD_TCP_CKSUM;
1242 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
1243 DEV_RX_OFFLOAD_RSS_HASH;
1245 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1246 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1248 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1249 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1250 DEV_TX_OFFLOAD_UDP_CKSUM |
1251 DEV_TX_OFFLOAD_TCP_CKSUM;
1253 if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
1254 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1256 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1257 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1259 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1261 .pthresh = DEFAULT_RX_PTHRESH,
1262 .hthresh = DEFAULT_RX_HTHRESH,
1263 .wthresh = DEFAULT_RX_WTHRESH,
1265 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1269 dev_info->default_txconf = (struct rte_eth_txconf) {
1271 .pthresh = DEFAULT_TX_PTHRESH,
1272 .hthresh = DEFAULT_TX_HTHRESH,
1273 .wthresh = DEFAULT_TX_WTHRESH,
1275 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1276 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1279 dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1280 ETH_RSS_NONFRAG_IPV4_TCP |
1281 ETH_RSS_NONFRAG_IPV4_UDP |
1283 ETH_RSS_NONFRAG_IPV6_TCP |
1284 ETH_RSS_NONFRAG_IPV6_UDP;
1286 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1287 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1289 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1290 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1291 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1296 static const uint32_t *
1297 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1299 static const uint32_t ptypes[] = {
1300 /* refers to nfp_net_set_hash() */
1301 RTE_PTYPE_INNER_L3_IPV4,
1302 RTE_PTYPE_INNER_L3_IPV6,
1303 RTE_PTYPE_INNER_L3_IPV6_EXT,
1304 RTE_PTYPE_INNER_L4_MASK,
1308 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1314 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1316 struct nfp_net_rxq *rxq;
1317 struct nfp_net_rx_desc *rxds;
1321 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1328 * Other PMDs are just checking the DD bit in intervals of 4
1329 * descriptors and counting all four if the first has the DD
1330 * bit on. Of course, this is not accurate but can be good for
1331 * performance. But ideally that should be done in descriptors
1332 * chunks belonging to the same cache line
1335 while (count < rxq->rx_count) {
1336 rxds = &rxq->rxds[idx];
1337 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1344 if ((idx) == rxq->rx_count)
1352 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1354 struct rte_pci_device *pci_dev;
1355 struct nfp_net_hw *hw;
1358 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1364 /* Make sure all updates are written before un-masking */
1366 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1367 NFP_NET_CFG_ICR_UNMASKED);
1372 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1374 struct rte_pci_device *pci_dev;
1375 struct nfp_net_hw *hw;
1378 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1379 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1381 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1384 /* Make sure all updates are written before un-masking */
1386 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1391 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1393 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1394 struct rte_eth_link link;
1396 rte_eth_linkstatus_get(dev, &link);
1397 if (link.link_status)
1398 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1399 dev->data->port_id, link.link_speed,
1400 link.link_duplex == ETH_LINK_FULL_DUPLEX
1401 ? "full-duplex" : "half-duplex");
1403 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1404 dev->data->port_id);
1406 PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1407 pci_dev->addr.domain, pci_dev->addr.bus,
1408 pci_dev->addr.devid, pci_dev->addr.function);
1411 /* Interrupt configuration and handling */
1414 * nfp_net_irq_unmask - Unmask an interrupt
1416 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1417 * clear the ICR for the entry.
1420 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1422 struct nfp_net_hw *hw;
1423 struct rte_pci_device *pci_dev;
1425 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1426 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1428 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1429 /* If MSI-X auto-masking is used, clear the entry */
1431 rte_intr_ack(&pci_dev->intr_handle);
1433 /* Make sure all updates are written before un-masking */
1435 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1436 NFP_NET_CFG_ICR_UNMASKED);
1441 nfp_net_dev_interrupt_handler(void *param)
1444 struct rte_eth_link link;
1445 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1447 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1449 rte_eth_linkstatus_get(dev, &link);
1451 nfp_net_link_update(dev, 0);
1454 if (!link.link_status) {
1455 /* handle it 1 sec later, wait it being stable */
1456 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1457 /* likely to down */
1459 /* handle it 4 sec later, wait it being stable */
1460 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1463 if (rte_eal_alarm_set(timeout * 1000,
1464 nfp_net_dev_interrupt_delayed_handler,
1466 PMD_INIT_LOG(ERR, "Error setting alarm");
1468 nfp_net_irq_unmask(dev);
1473 * Interrupt handler which shall be registered for alarm callback for delayed
1474 * handling specific interrupt to wait for the stable nic state. As the NIC
1475 * interrupt state is not stable for nfp after link is just down, it needs
1476 * to wait 4 seconds to get the stable status.
1478 * @param handle Pointer to interrupt handle.
1479 * @param param The address of parameter (struct rte_eth_dev *)
1484 nfp_net_dev_interrupt_delayed_handler(void *param)
1486 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1488 nfp_net_link_update(dev, 0);
1489 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1491 nfp_net_dev_link_status_print(dev);
1494 nfp_net_irq_unmask(dev);
1498 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1500 struct nfp_net_hw *hw;
1502 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504 /* check that mtu is within the allowed range */
1505 if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
1508 /* mtu setting is forbidden if port is started */
1509 if (dev->data->dev_started) {
1510 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1511 dev->data->port_id);
1515 /* switch to jumbo mode if needed */
1516 if ((uint32_t)mtu > RTE_ETHER_MAX_LEN)
1517 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1519 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1521 /* update max frame size */
1522 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1524 /* writing to configuration space */
1525 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1533 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1534 uint16_t queue_idx, uint16_t nb_desc,
1535 unsigned int socket_id,
1536 const struct rte_eth_rxconf *rx_conf,
1537 struct rte_mempool *mp)
1539 const struct rte_memzone *tz;
1540 struct nfp_net_rxq *rxq;
1541 struct nfp_net_hw *hw;
1543 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545 PMD_INIT_FUNC_TRACE();
1547 /* Validating number of descriptors */
1548 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1549 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1550 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1551 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1556 * Free memory prior to re-allocation if needed. This is the case after
1557 * calling nfp_net_stop
1559 if (dev->data->rx_queues[queue_idx]) {
1560 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1561 dev->data->rx_queues[queue_idx] = NULL;
1564 /* Allocating rx queue data structure */
1565 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1566 RTE_CACHE_LINE_SIZE, socket_id);
1570 /* Hw queues mapping based on firmware configuration */
1571 rxq->qidx = queue_idx;
1572 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1573 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1574 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1575 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1578 * Tracking mbuf size for detecting a potential mbuf overflow due to
1582 rxq->mbuf_size = rxq->mem_pool->elt_size;
1583 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1584 hw->flbufsz = rxq->mbuf_size;
1586 rxq->rx_count = nb_desc;
1587 rxq->port_id = dev->data->port_id;
1588 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1589 rxq->drop_en = rx_conf->rx_drop_en;
1592 * Allocate RX ring hardware descriptors. A memzone large enough to
1593 * handle the maximum ring size is allocated in order to allow for
1594 * resizing in later calls to the queue setup function.
1596 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1597 sizeof(struct nfp_net_rx_desc) *
1598 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1602 PMD_DRV_LOG(ERR, "Error allocating rx dma");
1603 nfp_net_rx_queue_release(rxq);
1607 /* Saving physical and virtual addresses for the RX ring */
1608 rxq->dma = (uint64_t)tz->iova;
1609 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1611 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1612 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1613 sizeof(*rxq->rxbufs) * nb_desc,
1614 RTE_CACHE_LINE_SIZE, socket_id);
1615 if (rxq->rxbufs == NULL) {
1616 nfp_net_rx_queue_release(rxq);
1620 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1621 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1623 nfp_net_reset_rx_queue(rxq);
1625 dev->data->rx_queues[queue_idx] = rxq;
1629 * Telling the HW about the physical address of the RX ring and number
1630 * of descriptors in log2 format
1632 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1633 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1639 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1641 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1645 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
1648 for (i = 0; i < rxq->rx_count; i++) {
1649 struct nfp_net_rx_desc *rxd;
1650 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1653 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
1654 (unsigned)rxq->qidx);
1658 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1660 rxd = &rxq->rxds[i];
1662 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1663 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1665 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
1668 /* Make sure all writes are flushed before telling the hardware */
1671 /* Not advertising the whole ring as the firmware gets confused if so */
1672 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
1675 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1681 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1682 uint16_t nb_desc, unsigned int socket_id,
1683 const struct rte_eth_txconf *tx_conf)
1685 const struct rte_memzone *tz;
1686 struct nfp_net_txq *txq;
1687 uint16_t tx_free_thresh;
1688 struct nfp_net_hw *hw;
1690 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1692 PMD_INIT_FUNC_TRACE();
1694 /* Validating number of descriptors */
1695 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1696 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1697 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1698 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1702 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1703 tx_conf->tx_free_thresh :
1704 DEFAULT_TX_FREE_THRESH);
1706 if (tx_free_thresh > (nb_desc)) {
1708 "tx_free_thresh must be less than the number of TX "
1709 "descriptors. (tx_free_thresh=%u port=%d "
1710 "queue=%d)", (unsigned int)tx_free_thresh,
1711 dev->data->port_id, (int)queue_idx);
1716 * Free memory prior to re-allocation if needed. This is the case after
1717 * calling nfp_net_stop
1719 if (dev->data->tx_queues[queue_idx]) {
1720 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1722 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1723 dev->data->tx_queues[queue_idx] = NULL;
1726 /* Allocating tx queue data structure */
1727 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1728 RTE_CACHE_LINE_SIZE, socket_id);
1730 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1735 * Allocate TX ring hardware descriptors. A memzone large enough to
1736 * handle the maximum ring size is allocated in order to allow for
1737 * resizing in later calls to the queue setup function.
1739 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1740 sizeof(struct nfp_net_tx_desc) *
1741 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1744 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1745 nfp_net_tx_queue_release(txq);
1749 txq->tx_count = nb_desc;
1750 txq->tx_free_thresh = tx_free_thresh;
1751 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1752 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1753 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1755 /* queue mapping based on firmware configuration */
1756 txq->qidx = queue_idx;
1757 txq->tx_qcidx = queue_idx * hw->stride_tx;
1758 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1760 txq->port_id = dev->data->port_id;
1762 /* Saving physical and virtual addresses for the TX ring */
1763 txq->dma = (uint64_t)tz->iova;
1764 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1766 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1767 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1768 sizeof(*txq->txbufs) * nb_desc,
1769 RTE_CACHE_LINE_SIZE, socket_id);
1770 if (txq->txbufs == NULL) {
1771 nfp_net_tx_queue_release(txq);
1774 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1775 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1777 nfp_net_reset_tx_queue(txq);
1779 dev->data->tx_queues[queue_idx] = txq;
1783 * Telling the HW about the physical address of the TX ring and number
1784 * of descriptors in log2 format
1786 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1787 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1792 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1794 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1795 struct rte_mbuf *mb)
1798 struct nfp_net_hw *hw = txq->hw;
1800 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
1803 ol_flags = mb->ol_flags;
1805 if (!(ol_flags & PKT_TX_TCP_SEG))
1808 txd->l3_offset = mb->l2_len;
1809 txd->l4_offset = mb->l2_len + mb->l3_len;
1810 txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
1811 txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
1812 txd->flags = PCIE_DESC_TX_LSO;
1819 txd->lso_hdrlen = 0;
1823 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1825 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1826 struct rte_mbuf *mb)
1829 struct nfp_net_hw *hw = txq->hw;
1831 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1834 ol_flags = mb->ol_flags;
1836 /* IPv6 does not need checksum */
1837 if (ol_flags & PKT_TX_IP_CKSUM)
1838 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1840 switch (ol_flags & PKT_TX_L4_MASK) {
1841 case PKT_TX_UDP_CKSUM:
1842 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1844 case PKT_TX_TCP_CKSUM:
1845 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1849 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1850 txd->flags |= PCIE_DESC_TX_CSUM;
1853 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1855 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1856 struct rte_mbuf *mb)
1858 struct nfp_net_hw *hw = rxq->hw;
1860 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1863 /* If IPv4 and IP checksum error, fail */
1864 if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1865 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
1866 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1868 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1870 /* If neither UDP nor TCP return */
1871 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1872 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1875 if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
1876 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1878 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1881 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1882 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1884 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1887 * nfp_net_set_hash - Set mbuf hash data
1889 * The RSS hash and hash-type are pre-pended to the packet data.
1890 * Extract and decode it and set the mbuf fields.
1893 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1894 struct rte_mbuf *mbuf)
1896 struct nfp_net_hw *hw = rxq->hw;
1897 uint8_t *meta_offset;
1900 uint32_t hash_type = 0;
1902 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1905 /* this is true for new firmwares */
1906 if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
1907 (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
1908 NFP_DESC_META_LEN(rxd))) {
1911 * <---- 32 bit ----->
1916 * ====================
1919 * Field type word contains up to 8 4bit field types
1920 * A 4bit field type refers to a data field word
1921 * A data field word can have several 4bit field types
1923 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1924 meta_offset -= NFP_DESC_META_LEN(rxd);
1925 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1927 /* NFP PMD just supports metadata for hashing */
1928 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1929 case NFP_NET_META_HASH:
1930 /* next field type is about the hash type */
1931 meta_info >>= NFP_NET_META_FIELD_SIZE;
1932 /* hash value is in the data field */
1933 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1934 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1937 /* Unsupported metadata can be a performance issue */
1941 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1944 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1945 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1948 mbuf->hash.rss = hash;
1949 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1951 switch (hash_type) {
1952 case NFP_NET_RSS_IPV4:
1953 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1955 case NFP_NET_RSS_IPV6:
1956 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1958 case NFP_NET_RSS_IPV6_EX:
1959 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1961 case NFP_NET_RSS_IPV4_TCP:
1962 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1964 case NFP_NET_RSS_IPV6_TCP:
1965 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1967 case NFP_NET_RSS_IPV4_UDP:
1968 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1970 case NFP_NET_RSS_IPV6_UDP:
1971 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1974 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1979 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1981 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1984 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1989 * There are some decisions to take:
1990 * 1) How to check DD RX descriptors bit
1991 * 2) How and when to allocate new mbufs
1993 * Current implementation checks just one single DD bit each loop. As each
1994 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1995 * a single cache line instead. Tests with this change have not shown any
1996 * performance improvement but it requires further investigation. For example,
1997 * depending on which descriptor is next, the number of descriptors could be
1998 * less than 8 for just checking those in the same cache line. This implies
1999 * extra work which could be counterproductive by itself. Indeed, last firmware
2000 * changes are just doing this: writing several descriptors with the DD bit
2001 * for saving PCIe bandwidth and DMA operations from the NFP.
2003 * Mbuf allocation is done when a new packet is received. Then the descriptor
2004 * is automatically linked with the new mbuf and the old one is given to the
2005 * user. The main drawback with this design is mbuf allocation is heavier than
2006 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
2007 * cache point of view it does not seem allocating the mbuf early on as we are
2008 * doing now have any benefit at all. Again, tests with this change have not
2009 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
2010 * so looking at the implications of this type of allocation should be studied
2015 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2017 struct nfp_net_rxq *rxq;
2018 struct nfp_net_rx_desc *rxds;
2019 struct nfp_net_rx_buff *rxb;
2020 struct nfp_net_hw *hw;
2021 struct rte_mbuf *mb;
2022 struct rte_mbuf *new_mb;
2028 if (unlikely(rxq == NULL)) {
2030 * DPDK just checks the queue is lower than max queues
2031 * enabled. But the queue needs to be configured
2033 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2041 while (avail < nb_pkts) {
2042 rxb = &rxq->rxbufs[rxq->rd_p];
2043 if (unlikely(rxb == NULL)) {
2044 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2048 rxds = &rxq->rxds[rxq->rd_p];
2049 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2053 * Memory barrier to ensure that we won't do other
2054 * reads before the DD bit.
2059 * We got a packet. Let's alloc a new mbuf for refilling the
2060 * free descriptor ring as soon as possible
2062 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2063 if (unlikely(new_mb == NULL)) {
2064 RTE_LOG_DP(DEBUG, PMD,
2065 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2066 rxq->port_id, (unsigned int)rxq->qidx);
2067 nfp_net_mbuf_alloc_failed(rxq);
2074 * Grab the mbuf and refill the descriptor with the
2075 * previously allocated mbuf
2080 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
2081 rxds->rxd.data_len, rxq->mbuf_size);
2083 /* Size of this segment */
2084 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2085 /* Size of the whole packet. We just support 1 segment */
2086 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2088 if (unlikely((mb->data_len + hw->rx_offset) >
2091 * This should not happen and the user has the
2092 * responsibility of avoiding it. But we have
2093 * to give some info about the error
2095 RTE_LOG_DP(ERR, PMD,
2096 "mbuf overflow likely due to the RX offset.\n"
2097 "\t\tYour mbuf size should have extra space for"
2098 " RX offset=%u bytes.\n"
2099 "\t\tCurrently you just have %u bytes available"
2100 " but the received packet is %u bytes long",
2102 rxq->mbuf_size - hw->rx_offset,
2107 /* Filling the received mbuf with packet info */
2109 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2111 mb->data_off = RTE_PKTMBUF_HEADROOM +
2112 NFP_DESC_META_LEN(rxds);
2114 /* No scatter mode supported */
2118 mb->port = rxq->port_id;
2120 /* Checking the RSS flag */
2121 nfp_net_set_hash(rxq, rxds, mb);
2123 /* Checking the checksum flag */
2124 nfp_net_rx_cksum(rxq, rxds, mb);
2126 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2127 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2128 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2129 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2132 /* Adding the mbuf to the mbuf array passed by the app */
2133 rx_pkts[avail++] = mb;
2135 /* Now resetting and updating the descriptor */
2138 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2140 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2141 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2144 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2151 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
2152 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2154 nb_hold += rxq->nb_rx_hold;
2157 * FL descriptors needs to be written before incrementing the
2158 * FL queue WR pointer
2161 if (nb_hold > rxq->rx_free_thresh) {
2162 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
2163 rxq->port_id, (unsigned int)rxq->qidx,
2164 (unsigned)nb_hold, (unsigned)avail);
2165 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2168 rxq->nb_rx_hold = nb_hold;
2174 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2176 * @txq: TX queue to work with
2177 * Returns number of descriptors freed
2180 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2185 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2186 " status", txq->qidx);
2188 /* Work out how many packets have been sent */
2189 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2191 if (qcp_rd_p == txq->rd_p) {
2192 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2193 "packets (%u, %u)", txq->qidx,
2194 qcp_rd_p, txq->rd_p);
2198 if (qcp_rd_p > txq->rd_p)
2199 todo = qcp_rd_p - txq->rd_p;
2201 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2203 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
2204 qcp_rd_p, txq->rd_p, txq->rd_p);
2210 if (unlikely(txq->rd_p >= txq->tx_count))
2211 txq->rd_p -= txq->tx_count;
2216 /* Leaving always free descriptors for avoiding wrapping confusion */
2218 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2220 if (txq->wr_p >= txq->rd_p)
2221 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2223 return txq->rd_p - txq->wr_p - 8;
2227 * nfp_net_txq_full - Check if the TX queue free descriptors
2228 * is below tx_free_threshold
2230 * @txq: TX queue to check
2232 * This function uses the host copy* of read/write pointers
2235 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2237 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2241 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2243 struct nfp_net_txq *txq;
2244 struct nfp_net_hw *hw;
2245 struct nfp_net_tx_desc *txds, txd;
2246 struct rte_mbuf *pkt;
2248 int pkt_size, dma_size;
2249 uint16_t free_descs, issued_descs;
2250 struct rte_mbuf **lmbuf;
2255 txds = &txq->txds[txq->wr_p];
2257 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
2258 txq->qidx, txq->wr_p, nb_pkts);
2260 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2261 nfp_net_tx_free_bufs(txq);
2263 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2264 if (unlikely(free_descs == 0))
2271 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
2272 txq->qidx, nb_pkts);
2273 /* Sending packets */
2274 while ((i < nb_pkts) && free_descs) {
2275 /* Grabbing the mbuf linked to the current descriptor */
2276 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2277 /* Warming the cache for releasing the mbuf later on */
2278 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2280 pkt = *(tx_pkts + i);
2282 if (unlikely((pkt->nb_segs > 1) &&
2283 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2284 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2285 rte_panic("Multisegment packet unsupported\n");
2288 /* Checking if we have enough descriptors */
2289 if (unlikely(pkt->nb_segs > free_descs))
2293 * Checksum and VLAN flags just in the first descriptor for a
2294 * multisegment packet, but TSO info needs to be in all of them.
2296 txd.data_len = pkt->pkt_len;
2297 nfp_net_tx_tso(txq, &txd, pkt);
2298 nfp_net_tx_cksum(txq, &txd, pkt);
2300 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2301 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2302 txd.flags |= PCIE_DESC_TX_VLAN;
2303 txd.vlan = pkt->vlan_tci;
2307 * mbuf data_len is the data in one segment and pkt_len data
2308 * in the whole packet. When the packet is just one segment,
2309 * then data_len = pkt_len
2311 pkt_size = pkt->pkt_len;
2314 /* Copying TSO, VLAN and cksum info */
2317 /* Releasing mbuf used by this descriptor previously*/
2319 rte_pktmbuf_free_seg(*lmbuf);
2322 * Linking mbuf with descriptor for being released
2323 * next time descriptor is used
2327 dma_size = pkt->data_len;
2328 dma_addr = rte_mbuf_data_iova(pkt);
2329 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2330 "%" PRIx64 "", dma_addr);
2332 /* Filling descriptors fields */
2333 txds->dma_len = dma_size;
2334 txds->data_len = txd.data_len;
2335 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2336 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2337 ASSERT(free_descs > 0);
2341 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2344 pkt_size -= dma_size;
2347 * Making the EOP, packets with just one segment
2350 if (likely(!pkt_size))
2351 txds->offset_eop = PCIE_DESC_TX_EOP;
2353 txds->offset_eop = 0;
2356 /* Referencing next free TX descriptor */
2357 txds = &txq->txds[txq->wr_p];
2358 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2365 /* Increment write pointers. Force memory write before we let HW know */
2367 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2373 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2375 uint32_t new_ctrl, update;
2376 struct nfp_net_hw *hw;
2379 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2382 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2383 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2384 PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2385 " ETH_VLAN_EXTEND_OFFLOAD");
2387 /* Enable vlan strip if it is not configured yet */
2388 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2389 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2390 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2392 /* Disable vlan strip just if it is configured */
2393 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2394 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2395 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2400 update = NFP_NET_CFG_UPDATE_GEN;
2402 ret = nfp_net_reconfig(hw, new_ctrl, update);
2404 hw->ctrl = new_ctrl;
2410 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2411 struct rte_eth_rss_reta_entry64 *reta_conf,
2414 uint32_t reta, mask;
2417 struct nfp_net_hw *hw =
2418 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2420 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2421 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2422 "(%d) doesn't match the number hardware can supported "
2423 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2428 * Update Redirection Table. There are 128 8bit-entries which can be
2429 * manage as 32 32bit-entries
2431 for (i = 0; i < reta_size; i += 4) {
2432 /* Handling 4 RSS entries per loop */
2433 idx = i / RTE_RETA_GROUP_SIZE;
2434 shift = i % RTE_RETA_GROUP_SIZE;
2435 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2441 /* If all 4 entries were set, don't need read RETA register */
2443 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2445 for (j = 0; j < 4; j++) {
2446 if (!(mask & (0x1 << j)))
2449 /* Clearing the entry bits */
2450 reta &= ~(0xFF << (8 * j));
2451 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2453 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2459 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2461 nfp_net_reta_update(struct rte_eth_dev *dev,
2462 struct rte_eth_rss_reta_entry64 *reta_conf,
2465 struct nfp_net_hw *hw =
2466 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2470 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2473 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2477 update = NFP_NET_CFG_UPDATE_RSS;
2479 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2485 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2487 nfp_net_reta_query(struct rte_eth_dev *dev,
2488 struct rte_eth_rss_reta_entry64 *reta_conf,
2494 struct nfp_net_hw *hw;
2496 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2498 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2501 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2502 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2503 "(%d) doesn't match the number hardware can supported "
2504 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2509 * Reading Redirection Table. There are 128 8bit-entries which can be
2510 * manage as 32 32bit-entries
2512 for (i = 0; i < reta_size; i += 4) {
2513 /* Handling 4 RSS entries per loop */
2514 idx = i / RTE_RETA_GROUP_SIZE;
2515 shift = i % RTE_RETA_GROUP_SIZE;
2516 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2521 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2523 for (j = 0; j < 4; j++) {
2524 if (!(mask & (0x1 << j)))
2526 reta_conf[idx].reta[shift + j] =
2527 (uint8_t)((reta >> (8 * j)) & 0xF);
2534 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2535 struct rte_eth_rss_conf *rss_conf)
2537 struct nfp_net_hw *hw;
2539 uint32_t cfg_rss_ctrl = 0;
2543 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2545 /* Writing the key byte a byte */
2546 for (i = 0; i < rss_conf->rss_key_len; i++) {
2547 memcpy(&key, &rss_conf->rss_key[i], 1);
2548 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2551 rss_hf = rss_conf->rss_hf;
2553 if (rss_hf & ETH_RSS_IPV4)
2554 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
2556 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2557 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
2559 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2560 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
2562 if (rss_hf & ETH_RSS_IPV6)
2563 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
2565 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2566 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
2568 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2569 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
2571 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2572 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2574 /* configuring where to apply the RSS hash */
2575 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2577 /* Writing the key size */
2578 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2584 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2585 struct rte_eth_rss_conf *rss_conf)
2589 struct nfp_net_hw *hw;
2591 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2593 rss_hf = rss_conf->rss_hf;
2595 /* Checking if RSS is enabled */
2596 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2597 if (rss_hf != 0) { /* Enable RSS? */
2598 PMD_DRV_LOG(ERR, "RSS unsupported");
2601 return 0; /* Nothing to do */
2604 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2605 PMD_DRV_LOG(ERR, "hash key too long");
2609 nfp_net_rss_hash_write(dev, rss_conf);
2611 update = NFP_NET_CFG_UPDATE_RSS;
2613 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2620 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2621 struct rte_eth_rss_conf *rss_conf)
2624 uint32_t cfg_rss_ctrl;
2627 struct nfp_net_hw *hw;
2629 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2631 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2634 rss_hf = rss_conf->rss_hf;
2635 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2637 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2638 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2640 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2641 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2643 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2644 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2646 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2647 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2649 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2650 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2652 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2653 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2655 /* Reading the key size */
2656 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2658 /* Reading the key byte a byte */
2659 for (i = 0; i < rss_conf->rss_key_len; i++) {
2660 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2661 memcpy(&rss_conf->rss_key[i], &key, 1);
2668 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2670 struct rte_eth_conf *dev_conf;
2671 struct rte_eth_rss_conf rss_conf;
2672 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2673 uint16_t rx_queues = dev->data->nb_rx_queues;
2677 PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
2680 nfp_reta_conf[0].mask = ~0x0;
2681 nfp_reta_conf[1].mask = ~0x0;
2684 for (i = 0; i < 0x40; i += 8) {
2685 for (j = i; j < (i + 8); j++) {
2686 nfp_reta_conf[0].reta[j] = queue;
2687 nfp_reta_conf[1].reta[j] = queue++;
2691 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2695 dev_conf = &dev->data->dev_conf;
2697 PMD_DRV_LOG(INFO, "wrong rss conf");
2700 rss_conf = dev_conf->rx_adv_conf.rss_conf;
2702 ret = nfp_net_rss_hash_write(dev, &rss_conf);
2708 /* Initialise and register driver with DPDK Application */
2709 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2710 .dev_configure = nfp_net_configure,
2711 .dev_start = nfp_net_start,
2712 .dev_stop = nfp_net_stop,
2713 .dev_set_link_up = nfp_net_set_link_up,
2714 .dev_set_link_down = nfp_net_set_link_down,
2715 .dev_close = nfp_net_close,
2716 .promiscuous_enable = nfp_net_promisc_enable,
2717 .promiscuous_disable = nfp_net_promisc_disable,
2718 .link_update = nfp_net_link_update,
2719 .stats_get = nfp_net_stats_get,
2720 .stats_reset = nfp_net_stats_reset,
2721 .dev_infos_get = nfp_net_infos_get,
2722 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2723 .mtu_set = nfp_net_dev_mtu_set,
2724 .mac_addr_set = nfp_set_mac_addr,
2725 .vlan_offload_set = nfp_net_vlan_offload_set,
2726 .reta_update = nfp_net_reta_update,
2727 .reta_query = nfp_net_reta_query,
2728 .rss_hash_update = nfp_net_rss_hash_update,
2729 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2730 .rx_queue_setup = nfp_net_rx_queue_setup,
2731 .rx_queue_release = nfp_net_rx_queue_release,
2732 .rx_queue_count = nfp_net_rx_queue_count,
2733 .tx_queue_setup = nfp_net_tx_queue_setup,
2734 .tx_queue_release = nfp_net_tx_queue_release,
2735 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2736 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2740 * All eth_dev created got its private data, but before nfp_net_init, that
2741 * private data is referencing private data for all the PF ports. This is due
2742 * to how the vNIC bars are mapped based on first port, so all ports need info
2743 * about port 0 private data. Inside nfp_net_init the private data pointer is
2744 * changed to the right address for each port once the bars have been mapped.
2746 * This functions helps to find out which port and therefore which offset
2747 * inside the private data array to use.
2750 get_pf_port_number(char *name)
2752 char *pf_str = name;
2755 while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2760 * This should not happen at all and it would mean major
2761 * implementation fault.
2763 rte_panic("nfp_net: problem with pf device name\n");
2765 /* Expecting _portX with X within [0,7] */
2768 return (int)strtol(pf_str, NULL, 10);
2772 nfp_net_init(struct rte_eth_dev *eth_dev)
2774 struct rte_pci_device *pci_dev;
2775 struct nfp_net_hw *hw, *hwport0;
2777 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2783 PMD_INIT_FUNC_TRACE();
2785 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2787 /* NFP can not handle DMA addresses requiring more than 40 bits */
2788 if (rte_mem_check_dma_mask(40)) {
2789 RTE_LOG(ERR, PMD, "device %s can not be used:",
2790 pci_dev->device.name);
2791 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
2795 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2796 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2797 port = get_pf_port_number(eth_dev->data->name);
2798 if (port < 0 || port > 7) {
2799 PMD_DRV_LOG(ERR, "Port value is wrong");
2803 PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
2805 /* This points to port 0 private data */
2806 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2808 /* This points to the specific port private data */
2809 hw = &hwport0[port];
2811 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2815 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2816 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2817 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2819 /* For secondary processes, the primary has done all the work */
2820 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2823 rte_eth_copy_pci_info(eth_dev, pci_dev);
2825 hw->device_id = pci_dev->id.device_id;
2826 hw->vendor_id = pci_dev->id.vendor_id;
2827 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2828 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2830 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2831 pci_dev->id.vendor_id, pci_dev->id.device_id,
2832 pci_dev->addr.domain, pci_dev->addr.bus,
2833 pci_dev->addr.devid, pci_dev->addr.function);
2835 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2836 if (hw->ctrl_bar == NULL) {
2838 "hw->ctrl_bar is NULL. BAR0 not configured");
2842 if (hw->is_pf && port == 0) {
2843 hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
2844 hw->total_ports * 32768,
2846 if (!hw->ctrl_bar) {
2847 printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
2851 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2855 if (!hwport0->ctrl_bar)
2858 /* address based on port0 offset */
2859 hw->ctrl_bar = hwport0->ctrl_bar +
2860 (port * NFP_PF_CSR_SLICE_SIZE);
2863 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2865 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2866 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2868 /* Work out where in the BAR the queues start. */
2869 switch (pci_dev->id.device_id) {
2870 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2871 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2872 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2873 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2874 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2875 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2876 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2879 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
2881 goto dev_err_ctrl_map;
2884 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
2885 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
2887 if (hw->is_pf && port == 0) {
2888 /* configure access to tx/rx vNIC BARs */
2889 hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
2891 NFP_QCP_QUEUE_AREA_SZ,
2892 &hw->hwqueues_area);
2894 if (!hwport0->hw_queues) {
2895 printf("nfp_rtsym_map fails for net.qc");
2897 goto dev_err_ctrl_map;
2900 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
2901 hwport0->hw_queues);
2905 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2906 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2907 eth_dev->data->dev_private = hw;
2909 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2911 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2915 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2916 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2918 nfp_net_cfg_queue_setup(hw);
2920 /* Get some of the read-only fields from the config BAR */
2921 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2922 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2923 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2924 hw->mtu = RTE_ETHER_MTU;
2926 /* VLAN insertion is incompatible with LSOv2 */
2927 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
2928 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
2930 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2931 hw->rx_offset = NFP_NET_RX_OFFSET;
2933 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2935 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
2936 NFD_CFG_MAJOR_VERSION_of(hw->ver),
2937 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
2939 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2940 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2941 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2942 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2943 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2944 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2945 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2946 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2947 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2948 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2949 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2950 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2951 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
2952 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2953 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
2957 hw->stride_rx = stride;
2958 hw->stride_tx = stride;
2960 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2961 hw->max_rx_queues, hw->max_tx_queues);
2963 /* Initializing spinlock for reconfigs */
2964 rte_spinlock_init(&hw->reconfig_lock);
2966 /* Allocating memory for mac addr */
2967 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2968 RTE_ETHER_ADDR_LEN, 0);
2969 if (eth_dev->data->mac_addrs == NULL) {
2970 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2972 goto dev_err_queues_map;
2976 nfp_net_pf_read_mac(hwport0, port);
2977 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2979 nfp_net_vf_read_mac(hw);
2982 if (!rte_is_valid_assigned_ether_addr(
2983 (struct rte_ether_addr *)&hw->mac_addr)) {
2984 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
2986 /* Using random mac addresses for VFs */
2987 rte_eth_random_addr(&hw->mac_addr[0]);
2988 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2991 /* Copying mac address to DPDK eth_dev struct */
2992 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
2993 ð_dev->data->mac_addrs[0]);
2995 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
2996 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
2998 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2999 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
3000 eth_dev->data->port_id, pci_dev->id.vendor_id,
3001 pci_dev->id.device_id,
3002 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
3003 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
3005 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3006 /* Registering LSC interrupt handler */
3007 rte_intr_callback_register(&pci_dev->intr_handle,
3008 nfp_net_dev_interrupt_handler,
3010 /* Telling the firmware about the LSC interrupt entry */
3011 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3012 /* Recording current stats counters values */
3013 nfp_net_stats_reset(eth_dev);
3019 nfp_cpp_area_free(hw->hwqueues_area);
3021 nfp_cpp_area_free(hw->ctrl_area);
3026 #define NFP_CPP_MEMIO_BOUNDARY (1 << 20)
3029 * Serving a write request to NFP from host programs. The request
3030 * sends the write size and the CPP target. The bridge makes use
3031 * of CPP interface handler configured by the PMD setup.
3034 nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
3036 struct nfp_cpp_area *area;
3037 off_t offset, nfp_offset;
3038 uint32_t cpp_id, pos, len;
3039 uint32_t tmpbuf[16];
3040 size_t count, curlen, totlen = 0;
3043 PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__,
3044 sizeof(off_t), sizeof(size_t));
3046 /* Reading the count param */
3047 err = recv(sockfd, &count, sizeof(off_t), 0);
3048 if (err != sizeof(off_t))
3053 /* Reading the offset param */
3054 err = recv(sockfd, &offset, sizeof(off_t), 0);
3055 if (err != sizeof(off_t))
3058 /* Obtain target's CPP ID and offset in target */
3059 cpp_id = (offset >> 40) << 8;
3060 nfp_offset = offset & ((1ull << 40) - 1);
3062 PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count,
3064 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__,
3065 cpp_id, nfp_offset);
3067 /* Adjust length if not aligned */
3068 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3069 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3070 curlen = NFP_CPP_MEMIO_BOUNDARY -
3071 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3075 /* configure a CPP PCIe2CPP BAR for mapping the CPP target */
3076 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3077 nfp_offset, curlen);
3079 RTE_LOG(ERR, PMD, "%s: area alloc fail\n", __func__);
3083 /* mapping the target */
3084 err = nfp_cpp_area_acquire(area);
3086 RTE_LOG(ERR, PMD, "area acquire failed\n");
3087 nfp_cpp_area_free(area);
3091 for (pos = 0; pos < curlen; pos += len) {
3093 if (len > sizeof(tmpbuf))
3094 len = sizeof(tmpbuf);
3096 PMD_CPP_LOG(DEBUG, "%s: Receive %u of %lu\n", __func__,
3098 err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
3099 if (err != (int)len) {
3101 "%s: error when receiving, %d of %lu\n",
3102 __func__, err, count);
3103 nfp_cpp_area_release(area);
3104 nfp_cpp_area_free(area);
3107 err = nfp_cpp_area_write(area, pos, tmpbuf, len);
3109 RTE_LOG(ERR, PMD, "nfp_cpp_area_write error\n");
3110 nfp_cpp_area_release(area);
3111 nfp_cpp_area_free(area);
3118 nfp_cpp_area_release(area);
3119 nfp_cpp_area_free(area);
3122 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3123 NFP_CPP_MEMIO_BOUNDARY : count;
3130 * Serving a read request to NFP from host programs. The request
3131 * sends the read size and the CPP target. The bridge makes use
3132 * of CPP interface handler configured by the PMD setup. The read
3133 * data is sent to the requester using the same socket.
3136 nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
3138 struct nfp_cpp_area *area;
3139 off_t offset, nfp_offset;
3140 uint32_t cpp_id, pos, len;
3141 uint32_t tmpbuf[16];
3142 size_t count, curlen, totlen = 0;
3145 PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__,
3146 sizeof(off_t), sizeof(size_t));
3148 /* Reading the count param */
3149 err = recv(sockfd, &count, sizeof(off_t), 0);
3150 if (err != sizeof(off_t))
3155 /* Reading the offset param */
3156 err = recv(sockfd, &offset, sizeof(off_t), 0);
3157 if (err != sizeof(off_t))
3160 /* Obtain target's CPP ID and offset in target */
3161 cpp_id = (offset >> 40) << 8;
3162 nfp_offset = offset & ((1ull << 40) - 1);
3164 PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count,
3166 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__,
3167 cpp_id, nfp_offset);
3169 /* Adjust length if not aligned */
3170 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3171 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3172 curlen = NFP_CPP_MEMIO_BOUNDARY -
3173 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3177 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3178 nfp_offset, curlen);
3180 RTE_LOG(ERR, PMD, "%s: area alloc failed\n", __func__);
3184 err = nfp_cpp_area_acquire(area);
3186 RTE_LOG(ERR, PMD, "area acquire failed\n");
3187 nfp_cpp_area_free(area);
3191 for (pos = 0; pos < curlen; pos += len) {
3193 if (len > sizeof(tmpbuf))
3194 len = sizeof(tmpbuf);
3196 err = nfp_cpp_area_read(area, pos, tmpbuf, len);
3198 RTE_LOG(ERR, PMD, "nfp_cpp_area_read error\n");
3199 nfp_cpp_area_release(area);
3200 nfp_cpp_area_free(area);
3203 PMD_CPP_LOG(DEBUG, "%s: sending %u of %lu\n", __func__,
3206 err = send(sockfd, tmpbuf, len, 0);
3207 if (err != (int)len) {
3209 "%s: error when sending: %d of %lu\n",
3210 __func__, err, count);
3211 nfp_cpp_area_release(area);
3212 nfp_cpp_area_free(area);
3219 nfp_cpp_area_release(area);
3220 nfp_cpp_area_free(area);
3223 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3224 NFP_CPP_MEMIO_BOUNDARY : count;
3229 #define NFP_IOCTL 'n'
3230 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
3232 * Serving a ioctl command from host NFP tools. This usually goes to
3233 * a kernel driver char driver but it is not available when the PF is
3234 * bound to the PMD. Currently just one ioctl command is served and it
3235 * does not require any CPP access at all.
3238 nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp)
3240 uint32_t cmd, ident_size, tmp;
3243 /* Reading now the IOCTL command */
3244 err = recv(sockfd, &cmd, 4, 0);
3246 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3250 /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
3251 if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) {
3252 RTE_LOG(ERR, PMD, "%s: unknown cmd %d\n", __func__, cmd);
3256 err = recv(sockfd, &ident_size, 4, 0);
3258 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3262 tmp = nfp_cpp_model(cpp);
3264 PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp);
3266 err = send(sockfd, &tmp, 4, 0);
3268 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3272 tmp = cpp->interface;
3274 PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp);
3276 err = send(sockfd, &tmp, 4, 0);
3278 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3285 #define NFP_BRIDGE_OP_READ 20
3286 #define NFP_BRIDGE_OP_WRITE 30
3287 #define NFP_BRIDGE_OP_IOCTL 40
3290 * This is the code to be executed by a service core. The CPP bridge interface
3291 * is based on a unix socket and requests usually received by a kernel char
3292 * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools
3293 * can be executed with a wrapper library and LD_LIBRARY being completely
3294 * unaware of the CPP bridge performing the NFP kernel char driver for CPP
3298 nfp_cpp_bridge_service_func(void *args)
3300 struct sockaddr address;
3301 struct nfp_cpp *cpp = args;
3302 int sockfd, datafd, op, ret;
3304 unlink("/tmp/nfp_cpp");
3305 sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
3307 RTE_LOG(ERR, PMD, "%s: socket creation error. Service failed\n",
3312 memset(&address, 0, sizeof(struct sockaddr));
3314 address.sa_family = AF_UNIX;
3315 strcpy(address.sa_data, "/tmp/nfp_cpp");
3317 ret = bind(sockfd, (const struct sockaddr *)&address,
3318 sizeof(struct sockaddr));
3320 RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n",
3326 ret = listen(sockfd, 20);
3328 RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n",
3335 datafd = accept(sockfd, NULL, NULL);
3337 RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n",
3339 RTE_LOG(ERR, PMD, "%s: service failed\n", __func__);
3345 ret = recv(datafd, &op, 4, 0);
3347 PMD_CPP_LOG(DEBUG, "%s: socket close\n",
3352 PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op);
3354 if (op == NFP_BRIDGE_OP_READ)
3355 nfp_cpp_bridge_serve_read(datafd, cpp);
3357 if (op == NFP_BRIDGE_OP_WRITE)
3358 nfp_cpp_bridge_serve_write(datafd, cpp);
3360 if (op == NFP_BRIDGE_OP_IOCTL)
3361 nfp_cpp_bridge_serve_ioctl(datafd, cpp);
3374 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
3375 struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
3376 int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
3378 struct rte_eth_dev *eth_dev;
3379 struct nfp_net_hw *hw = NULL;
3381 struct rte_service_spec service;
3384 port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
3389 snprintf(port_name, 100, "%s_port%d", dev->device.name, port);
3391 strlcat(port_name, dev->device.name, 100);
3394 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3395 eth_dev = rte_eth_dev_allocate(port_name);
3397 rte_free(port_name);
3401 *priv = rte_zmalloc(port_name,
3402 sizeof(struct nfp_net_adapter) *
3403 ports, RTE_CACHE_LINE_SIZE);
3405 rte_free(port_name);
3406 rte_eth_dev_release_port(eth_dev);
3410 eth_dev->data->dev_private = *priv;
3413 * dev_private pointing to port0 dev_private because we need
3414 * to configure vNIC bars based on port0 at nfp_net_init.
3415 * Then dev_private is adjusted per port.
3417 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
3419 hw->hwinfo = hwinfo;
3420 hw->sym_tbl = sym_tbl;
3421 hw->pf_port_idx = phys_port;
3424 hw->pf_multiport_enabled = 1;
3426 hw->total_ports = ports;
3428 eth_dev = rte_eth_dev_attach_secondary(port_name);
3430 RTE_LOG(ERR, EAL, "secondary process attach failed, "
3431 "ethdev doesn't exist");
3432 rte_free(port_name);
3435 eth_dev->process_private = cpp;
3438 eth_dev->device = &dev->device;
3439 rte_eth_copy_pci_info(eth_dev, dev);
3441 retval = nfp_net_init(eth_dev);
3447 rte_eth_dev_probing_finish(eth_dev);
3450 rte_free(port_name);
3454 * The rte_service needs to be created just once per PMD.
3455 * And the cpp handler needs to be linked to the service.
3456 * Secondary processes will be used for debugging DPDK apps
3457 * when requiring to use the CPP interface for accessing NFP
3458 * components. And the cpp handler for secondary processes is
3459 * available at this point.
3461 memset(&service, 0, sizeof(struct rte_service_spec));
3462 snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
3463 service.callback = nfp_cpp_bridge_service_func;
3464 service.callback_userdata = (void *)cpp;
3466 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private);
3468 if (rte_service_component_register(&service,
3469 &hw->nfp_cpp_service_id))
3470 RTE_LOG(ERR, PMD, "NFP CPP bridge service register() failed");
3472 RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
3478 rte_free(port_name);
3479 /* free ports private data if primary process */
3480 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3481 rte_free(eth_dev->data->dev_private);
3483 rte_eth_dev_release_port(eth_dev);
3488 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
3491 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3493 struct nfp_cpp *cpp = nsp->cpp;
3498 struct stat file_stat;
3501 /* Looking for firmware file in order of priority */
3503 /* First try to find a firmware image specific for this device */
3504 snprintf(serial, sizeof(serial),
3505 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3506 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3507 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3508 cpp->interface & 0xff);
3510 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
3513 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3514 fw_f = open(fw_name, O_RDONLY);
3518 /* Then try the PCI name */
3519 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
3522 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3523 fw_f = open(fw_name, O_RDONLY);
3527 /* Finally try the card type and media */
3528 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
3529 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3530 fw_f = open(fw_name, O_RDONLY);
3532 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
3537 if (fstat(fw_f, &file_stat) < 0) {
3538 PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
3543 fsize = file_stat.st_size;
3544 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
3545 fw_name, (uint64_t)fsize);
3547 fw_buf = malloc((size_t)fsize);
3549 PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
3553 memset(fw_buf, 0, fsize);
3555 bytes = read(fw_f, fw_buf, fsize);
3556 if (bytes != fsize) {
3557 PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
3558 "Just %" PRIu64 " of %" PRIu64 " bytes read",
3559 (uint64_t)bytes, (uint64_t)fsize);
3565 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
3566 nfp_nsp_load_fw(nsp, fw_buf, bytes);
3567 PMD_DRV_LOG(INFO, "Done");
3576 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3577 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3579 struct nfp_nsp *nsp;
3580 const char *nfp_fw_model;
3581 char card_desc[100];
3584 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3587 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
3589 PMD_DRV_LOG(ERR, "firmware model NOT found");
3593 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3594 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
3595 nfp_eth_table->count);
3599 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
3600 nfp_eth_table->count);
3602 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
3604 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
3605 nfp_fw_model, nfp_eth_table->count,
3606 nfp_eth_table->ports[0].speed / 1000);
3608 nsp = nfp_nsp_open(cpp);
3610 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
3614 nfp_nsp_device_soft_reset(nsp);
3615 err = nfp_fw_upload(dev, nsp, card_desc);
3621 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3622 struct rte_pci_device *dev)
3624 struct nfp_cpp *cpp;
3625 struct nfp_hwinfo *hwinfo;
3626 struct nfp_rtsym_table *sym_tbl;
3627 struct nfp_eth_table *nfp_eth_table = NULL;
3638 * When device bound to UIO, the device could be used, by mistake,
3639 * by two DPDK apps, and the UIO driver does not avoid it. This
3640 * could lead to a serious problem when configuring the NFP CPP
3641 * interface. Here we avoid this telling to the CPP init code to
3642 * use a lock file if UIO is being used.
3644 if (dev->kdrv == RTE_KDRV_VFIO)
3645 cpp = nfp_cpp_from_device_name(dev, 0);
3647 cpp = nfp_cpp_from_device_name(dev, 1);
3650 PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
3655 hwinfo = nfp_hwinfo_read(cpp);
3657 PMD_DRV_LOG(ERR, "Error reading hwinfo table");
3661 nfp_eth_table = nfp_eth_read_ports(cpp);
3662 if (!nfp_eth_table) {
3663 PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
3667 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3668 if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
3669 PMD_DRV_LOG(INFO, "Error when uploading firmware");
3675 /* Now the symbol table should be there */
3676 sym_tbl = nfp_rtsym_table_read(cpp);
3678 PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
3684 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3685 if (total_ports != (int)nfp_eth_table->count) {
3686 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
3690 PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
3692 if (total_ports <= 0 || total_ports > 8) {
3693 PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
3698 for (i = 0; i < total_ports; i++) {
3699 ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
3700 nfp_eth_table->ports[i].index,
3707 free(nfp_eth_table);
3711 int nfp_logtype_init;
3712 int nfp_logtype_driver;
3714 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3716 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3717 PCI_DEVICE_ID_NFP4000_PF_NIC)
3720 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3721 PCI_DEVICE_ID_NFP6000_PF_NIC)
3728 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3730 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3731 PCI_DEVICE_ID_NFP6000_VF_NIC)
3738 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3739 struct rte_pci_device *pci_dev)
3741 return rte_eth_dev_pci_generic_probe(pci_dev,
3742 sizeof(struct nfp_net_adapter), nfp_net_init);
3745 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3747 struct rte_eth_dev *eth_dev;
3748 struct nfp_net_hw *hw, *hwport0;
3751 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
3752 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
3753 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
3754 port = get_pf_port_number(eth_dev->data->name);
3756 * hotplug is not possible with multiport PF although freeing
3757 * data structures can be done for first port.
3761 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3762 hw = &hwport0[port];
3763 nfp_cpp_area_free(hw->ctrl_area);
3764 nfp_cpp_area_free(hw->hwqueues_area);
3767 nfp_cpp_free(hw->cpp);
3769 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3771 /* hotplug is not possible with multiport PF */
3772 if (hw->pf_multiport_enabled)
3774 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3777 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3778 .id_table = pci_id_nfp_pf_net_map,
3779 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3780 .probe = nfp_pf_pci_probe,
3781 .remove = eth_nfp_pci_remove,
3784 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3785 .id_table = pci_id_nfp_vf_net_map,
3786 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3787 .probe = eth_nfp_pci_probe,
3788 .remove = eth_nfp_pci_remove,
3791 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3792 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3793 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3794 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3795 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3796 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3798 RTE_INIT(nfp_init_log)
3800 nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
3801 if (nfp_logtype_init >= 0)
3802 rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
3803 nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
3804 if (nfp_logtype_driver >= 0)
3805 rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
3809 * c-file-style: "Linux"
3810 * indent-tabs-mode: t