2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
48 #include <sys/socket.h>
55 #include <rte_byteorder.h>
56 #include <rte_common.h>
58 #include <rte_debug.h>
59 #include <rte_ethdev.h>
61 #include <rte_ether.h>
62 #include <rte_malloc.h>
63 #include <rte_memzone.h>
64 #include <rte_mempool.h>
65 #include <rte_version.h>
66 #include <rte_string_fns.h>
67 #include <rte_alarm.h>
69 #include "nfp_net_pmd.h"
70 #include "nfp_net_logs.h"
71 #include "nfp_net_ctrl.h"
74 static void nfp_net_close(struct rte_eth_dev *dev);
75 static int nfp_net_configure(struct rte_eth_dev *dev);
76 static int nfp_net_init(struct rte_eth_dev *eth_dev);
77 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
78 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
80 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
82 static void nfp_net_rx_queue_release(void *rxq);
83 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
84 uint16_t nb_desc, unsigned int socket_id,
85 const struct rte_eth_rxconf *rx_conf,
86 struct rte_mempool *mp);
87 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
88 static void nfp_net_tx_queue_release(void *txq);
89 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
90 uint16_t nb_desc, unsigned int socket_id,
91 const struct rte_eth_txconf *tx_conf);
92 static int nfp_net_start(struct rte_eth_dev *dev);
93 static void nfp_net_stats_get(struct rte_eth_dev *dev,
94 struct rte_eth_stats *stats);
95 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
96 static void nfp_net_stop(struct rte_eth_dev *dev);
97 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
101 * The offset of the queue controller queues in the PCIe Target. These
102 * happen to be at the same offset on the NFP6000 and the NFP3200 so
103 * we use a single macro here.
105 #define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff)))
107 /* Maximum value which can be added to a queue with one transaction */
108 #define NFP_QCP_MAX_ADD 0x7f
110 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
111 (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
113 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
115 NFP_QCP_READ_PTR = 0,
120 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
121 * @q: Base address for queue structure
122 * @ptr: Add to the Read or Write pointer
123 * @val: Value to add to the queue pointer
125 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
128 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
132 if (ptr == NFP_QCP_READ_PTR)
133 off = NFP_QCP_QUEUE_ADD_RPTR;
135 off = NFP_QCP_QUEUE_ADD_WPTR;
137 while (val > NFP_QCP_MAX_ADD) {
138 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
139 val -= NFP_QCP_MAX_ADD;
142 nn_writel(rte_cpu_to_le_32(val), q + off);
146 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
147 * @q: Base address for queue structure
148 * @ptr: Read or Write pointer
150 static inline uint32_t
151 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
156 if (ptr == NFP_QCP_READ_PTR)
157 off = NFP_QCP_QUEUE_STS_LO;
159 off = NFP_QCP_QUEUE_STS_HI;
161 val = rte_cpu_to_le_32(nn_readl(q + off));
163 if (ptr == NFP_QCP_READ_PTR)
164 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
166 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
170 * Functions to read/write from/to Config BAR
171 * Performs any endian conversion necessary.
173 static inline uint8_t
174 nn_cfg_readb(struct nfp_net_hw *hw, int off)
176 return nn_readb(hw->ctrl_bar + off);
180 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
182 nn_writeb(val, hw->ctrl_bar + off);
185 static inline uint32_t
186 nn_cfg_readl(struct nfp_net_hw *hw, int off)
188 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
192 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
194 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
197 static inline uint64_t
198 nn_cfg_readq(struct nfp_net_hw *hw, int off)
200 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
204 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
206 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
209 /* Creating memzone for hardware rings. */
210 static const struct rte_memzone *
211 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
212 uint16_t queue_id, uint32_t ring_size, int socket_id)
214 char z_name[RTE_MEMZONE_NAMESIZE];
215 const struct rte_memzone *mz;
217 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
218 dev->driver->pci_drv.name,
219 ring_name, dev->data->port_id, queue_id);
221 mz = rte_memzone_lookup(z_name);
225 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0,
230 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
234 if (rxq->rxbufs == NULL)
237 for (i = 0; i < rxq->rx_count; i++) {
238 if (rxq->rxbufs[i].mbuf) {
239 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
240 rxq->rxbufs[i].mbuf = NULL;
246 nfp_net_rx_queue_release(void *rx_queue)
248 struct nfp_net_rxq *rxq = rx_queue;
251 nfp_net_rx_queue_release_mbufs(rxq);
252 rte_free(rxq->rxbufs);
258 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
260 nfp_net_rx_queue_release_mbufs(rxq);
267 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
271 if (txq->txbufs == NULL)
274 for (i = 0; i < txq->tx_count; i++) {
275 if (txq->txbufs[i].mbuf) {
276 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
277 txq->txbufs[i].mbuf = NULL;
283 nfp_net_tx_queue_release(void *tx_queue)
285 struct nfp_net_txq *txq = tx_queue;
288 nfp_net_tx_queue_release_mbufs(txq);
289 rte_free(txq->txbufs);
295 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
297 nfp_net_tx_queue_release_mbufs(txq);
304 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
308 struct timespec wait;
310 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
313 if (hw->qcp_cfg == NULL)
314 rte_panic("Bad configuration queue pointer\n");
316 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
319 wait.tv_nsec = 1000000;
321 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
323 /* Poll update field, waiting for NFP to ack the config */
324 for (cnt = 0; ; cnt++) {
325 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
328 if (new & NFP_NET_CFG_UPDATE_ERR) {
329 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
332 if (cnt >= NFP_NET_POLL_TIMEOUT) {
333 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
334 " %dms\n", update, cnt);
335 rte_panic("Exiting\n");
337 nanosleep(&wait, 0); /* waiting for a 1ms */
339 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
344 * Reconfigure the NIC
345 * @nn: device to reconfigure
346 * @ctrl: The value for the ctrl field in the BAR config
347 * @update: The value for the update field in the BAR config
349 * Write the update word to the BAR and ping the reconfig queue. Then poll
350 * until the firmware has acknowledged the update by zeroing the update word.
353 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
357 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
360 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
361 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
365 err = __nfp_net_reconfig(hw, update);
371 * Reconfig errors imply situations where they can be handled.
372 * Otherwise, rte_panic is called inside __nfp_net_reconfig
374 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
380 * Configure an Ethernet device. This function must be invoked first
381 * before any other function in the Ethernet API. This function can
382 * also be re-invoked when a device is in the stopped state.
385 nfp_net_configure(struct rte_eth_dev *dev)
387 struct rte_eth_conf *dev_conf;
388 struct rte_eth_rxmode *rxmode;
389 struct rte_eth_txmode *txmode;
390 uint32_t new_ctrl = 0;
392 struct nfp_net_hw *hw;
394 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
397 * A DPDK app sends info about how many queues to use and how
398 * those queues need to be configured. This is used by the
399 * DPDK core and it makes sure no more queues than those
400 * advertised by the driver are requested. This function is
401 * called after that internal process
404 PMD_INIT_LOG(DEBUG, "Configure\n");
406 dev_conf = &dev->data->dev_conf;
407 rxmode = &dev_conf->rxmode;
408 txmode = &dev_conf->txmode;
410 /* Checking TX mode */
411 if (txmode->mq_mode) {
412 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
416 /* Checking RX mode */
417 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
418 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
419 update = NFP_NET_CFG_UPDATE_RSS;
420 new_ctrl = NFP_NET_CFG_CTRL_RSS;
422 PMD_INIT_LOG(INFO, "RSS not supported\n");
427 if (rxmode->split_hdr_size) {
428 PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
432 if (rxmode->hw_ip_checksum) {
433 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
434 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
436 PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
441 if (rxmode->hw_vlan_filter) {
442 PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
446 if (rxmode->hw_vlan_strip) {
447 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
448 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
450 PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
455 if (rxmode->hw_vlan_extend) {
456 PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
460 /* Supporting VLAN insertion by default */
461 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
462 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
464 if (rxmode->jumbo_frame)
465 /* this is handled in rte_eth_dev_configure */
467 if (rxmode->hw_strip_crc) {
468 PMD_INIT_LOG(INFO, "strip CRC not supported\n");
472 if (rxmode->enable_scatter) {
473 PMD_INIT_LOG(INFO, "Scatter not supported\n");
480 update |= NFP_NET_CFG_UPDATE_GEN;
482 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
483 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
492 nfp_net_enable_queues(struct rte_eth_dev *dev)
494 struct nfp_net_hw *hw;
495 uint64_t enabled_queues = 0;
498 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
500 /* Enabling the required TX queues in the device */
501 for (i = 0; i < dev->data->nb_tx_queues; i++)
502 enabled_queues |= (1 << i);
504 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
508 /* Enabling the required RX queues in the device */
509 for (i = 0; i < dev->data->nb_rx_queues; i++)
510 enabled_queues |= (1 << i);
512 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
516 nfp_net_disable_queues(struct rte_eth_dev *dev)
518 struct nfp_net_hw *hw;
519 uint32_t new_ctrl, update = 0;
521 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
524 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
526 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
527 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
528 NFP_NET_CFG_UPDATE_MSIX;
530 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
531 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
533 /* If an error when reconfig we avoid to change hw state */
534 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
541 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
545 for (i = 0; i < dev->data->nb_rx_queues; i++) {
546 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
553 nfp_net_params_setup(struct nfp_net_hw *hw)
555 uint32_t *mac_address;
557 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
558 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
560 /* A MAC address is 8 bytes long */
561 mac_address = (uint32_t *)(hw->mac_addr);
563 nn_cfg_writel(hw, NFP_NET_CFG_MACADDR,
564 rte_cpu_to_be_32(*mac_address));
565 nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4,
566 rte_cpu_to_be_32(*(mac_address + 4)));
570 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
572 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
576 nfp_net_start(struct rte_eth_dev *dev)
578 uint32_t new_ctrl, update = 0;
579 struct nfp_net_hw *hw;
582 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
584 PMD_INIT_LOG(DEBUG, "Start\n");
586 /* Disabling queues just in case... */
587 nfp_net_disable_queues(dev);
589 /* Writing configuration parameters in the device */
590 nfp_net_params_setup(hw);
592 /* Enabling the required queues in the device */
593 nfp_net_enable_queues(dev);
596 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX;
597 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
599 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
600 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
602 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
603 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
607 * Allocating rte mbuffs for configured rx queues.
608 * This requires queues being enabled before
610 if (nfp_net_rx_freelist_setup(dev) < 0) {
621 * An error returned by this function should mean the app
622 * exiting and then the system releasing all the memory
623 * allocated even memory coming from hugepages.
625 * The device could be enabled at this point with some queues
626 * ready for getting packets. This is true if the call to
627 * nfp_net_rx_freelist_setup() succeeds for some queues but
628 * fails for subsequent queues.
630 * This should make the app exiting but better if we tell the
633 nfp_net_disable_queues(dev);
638 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
640 nfp_net_stop(struct rte_eth_dev *dev)
644 PMD_INIT_LOG(DEBUG, "Stop\n");
646 nfp_net_disable_queues(dev);
649 for (i = 0; i < dev->data->nb_tx_queues; i++) {
650 nfp_net_reset_tx_queue(
651 (struct nfp_net_txq *)dev->data->tx_queues[i]);
654 for (i = 0; i < dev->data->nb_rx_queues; i++) {
655 nfp_net_reset_rx_queue(
656 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
660 /* Reset and stop device. The device can not be restarted. */
662 nfp_net_close(struct rte_eth_dev *dev)
664 struct nfp_net_hw *hw;
666 PMD_INIT_LOG(DEBUG, "Close\n");
668 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
671 * We assume that the DPDK application is stopping all the
672 * threads/queues before calling the device close function.
677 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
680 * The ixgbe PMD driver disables the pcie master on the
681 * device. The i40e does not...
686 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
689 struct nfp_net_hw *hw;
690 struct rte_eth_stats nfp_dev_stats;
692 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
694 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
696 /* reading per RX ring stats */
697 for (i = 0; i < dev->data->nb_rx_queues; i++) {
698 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
701 nfp_dev_stats.q_ipackets[i] =
702 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
704 nfp_dev_stats.q_ipackets[i] -=
705 hw->eth_stats_base.q_ipackets[i];
707 nfp_dev_stats.q_ibytes[i] =
708 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
710 nfp_dev_stats.q_ibytes[i] -=
711 hw->eth_stats_base.q_ibytes[i];
714 /* reading per TX ring stats */
715 for (i = 0; i < dev->data->nb_tx_queues; i++) {
716 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
719 nfp_dev_stats.q_opackets[i] =
720 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
722 nfp_dev_stats.q_opackets[i] -=
723 hw->eth_stats_base.q_opackets[i];
725 nfp_dev_stats.q_obytes[i] =
726 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
728 nfp_dev_stats.q_obytes[i] -=
729 hw->eth_stats_base.q_obytes[i];
732 nfp_dev_stats.ipackets =
733 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
735 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
737 nfp_dev_stats.ibytes =
738 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
740 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
742 nfp_dev_stats.opackets =
743 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
745 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
747 nfp_dev_stats.obytes =
748 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
750 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
752 nfp_dev_stats.imcasts =
753 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
755 nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
757 /* reading general device stats */
758 nfp_dev_stats.ierrors =
759 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
761 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
763 nfp_dev_stats.oerrors =
764 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
766 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
768 /* Multicast frames received */
769 nfp_dev_stats.imcasts =
770 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
772 nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
774 /* RX ring mbuf allocation failures */
775 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
777 nfp_dev_stats.imissed =
778 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
780 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
783 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
787 nfp_net_stats_reset(struct rte_eth_dev *dev)
790 struct nfp_net_hw *hw;
792 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
795 * hw->eth_stats_base records the per counter starting point.
799 /* reading per RX ring stats */
800 for (i = 0; i < dev->data->nb_rx_queues; i++) {
801 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
804 hw->eth_stats_base.q_ipackets[i] =
805 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
807 hw->eth_stats_base.q_ibytes[i] =
808 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
811 /* reading per TX ring stats */
812 for (i = 0; i < dev->data->nb_tx_queues; i++) {
813 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
816 hw->eth_stats_base.q_opackets[i] =
817 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
819 hw->eth_stats_base.q_obytes[i] =
820 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
823 hw->eth_stats_base.ipackets =
824 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
826 hw->eth_stats_base.ibytes =
827 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
829 hw->eth_stats_base.opackets =
830 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
832 hw->eth_stats_base.obytes =
833 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
835 hw->eth_stats_base.imcasts =
836 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
838 /* reading general device stats */
839 hw->eth_stats_base.ierrors =
840 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
842 hw->eth_stats_base.oerrors =
843 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
845 /* Multicast frames received */
846 hw->eth_stats_base.imcasts =
847 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
849 /* RX ring mbuf allocation failures */
850 dev->data->rx_mbuf_alloc_failed = 0;
852 hw->eth_stats_base.imissed =
853 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
857 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
859 struct nfp_net_rxq *rxq;
860 struct nfp_net_rx_desc *rxds;
864 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
867 PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
871 idx = rxq->rd_p % rxq->rx_count;
872 rxds = &rxq->rxds[idx];
877 * Other PMDs are just checking the DD bit in intervals of 4
878 * descriptors and counting all four if the first has the DD
879 * bit on. Of course, this is not accurate but can be good for
880 * perfomance. But ideally that should be done in descriptors
881 * chunks belonging to the same cache line
884 while (count < rxq->rx_count) {
885 rxds = &rxq->rxds[idx];
886 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
893 if ((idx) == rxq->rx_count)
901 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
902 uint16_t queue_idx, uint16_t nb_desc,
903 unsigned int socket_id,
904 const struct rte_eth_rxconf *rx_conf,
905 struct rte_mempool *mp)
907 const struct rte_memzone *tz;
908 struct nfp_net_rxq *rxq;
909 struct nfp_net_hw *hw;
911 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
913 PMD_INIT_FUNC_TRACE();
915 /* Validating number of descriptors */
916 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
917 (nb_desc > NFP_NET_MAX_RX_DESC) ||
918 (nb_desc < NFP_NET_MIN_RX_DESC)) {
919 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
924 * Free memory prior to re-allocation if needed. This is the case after
925 * calling nfp_net_stop
927 if (dev->data->rx_queues[queue_idx]) {
928 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
929 dev->data->rx_queues[queue_idx] = NULL;
932 /* Allocating rx queue data structure */
933 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
934 RTE_CACHE_LINE_SIZE, socket_id);
938 /* Hw queues mapping based on firmware confifguration */
939 rxq->qidx = queue_idx;
940 rxq->fl_qcidx = queue_idx * hw->stride_rx;
941 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
942 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
943 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
946 * Tracking mbuf size for detecting a potential mbuf overflow due to
950 rxq->mbuf_size = rxq->mem_pool->elt_size;
951 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
952 hw->flbufsz = rxq->mbuf_size;
954 rxq->rx_count = nb_desc;
955 rxq->port_id = dev->data->port_id;
956 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
957 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
959 rxq->drop_en = rx_conf->rx_drop_en;
962 * Allocate RX ring hardware descriptors. A memzone large enough to
963 * handle the maximum ring size is allocated in order to allow for
964 * resizing in later calls to the queue setup function.
966 tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
967 sizeof(struct nfp_net_rx_desc) *
968 NFP_NET_MAX_RX_DESC, socket_id);
971 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
972 nfp_net_rx_queue_release(rxq);
976 /* Saving physical and virtual addresses for the RX ring */
977 rxq->dma = (uint64_t)tz->phys_addr;
978 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
980 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
981 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
982 sizeof(*rxq->rxbufs) * nb_desc,
983 RTE_CACHE_LINE_SIZE, socket_id);
984 if (rxq->rxbufs == NULL) {
985 nfp_net_rx_queue_release(rxq);
989 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
990 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
992 nfp_net_reset_rx_queue(rxq);
994 dev->data->rx_queues[queue_idx] = rxq;
998 * Telling the HW about the physical address of the RX ring and number
999 * of descriptors in log2 format
1001 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1002 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
1008 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1010 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1014 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1017 for (i = 0; i < rxq->rx_count; i++) {
1018 struct nfp_net_rx_desc *rxd;
1019 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1022 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1023 (unsigned)rxq->qidx);
1027 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1029 rxd = &rxq->rxds[i];
1031 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1032 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1034 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1039 /* Make sure all writes are flushed before telling the hardware */
1042 /* Not advertising the whole ring as the firmware gets confused if so */
1043 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1046 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1052 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1053 uint16_t nb_desc, unsigned int socket_id,
1054 const struct rte_eth_txconf *tx_conf)
1056 const struct rte_memzone *tz;
1057 struct nfp_net_txq *txq;
1058 uint16_t tx_free_thresh;
1059 struct nfp_net_hw *hw;
1061 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063 PMD_INIT_FUNC_TRACE();
1065 /* Validating number of descriptors */
1066 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1067 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1068 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1069 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1073 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1074 tx_conf->tx_free_thresh :
1075 DEFAULT_TX_FREE_THRESH);
1077 if (tx_free_thresh > (nb_desc)) {
1079 "tx_free_thresh must be less than the number of TX "
1080 "descriptors. (tx_free_thresh=%u port=%d "
1081 "queue=%d)\n", (unsigned int)tx_free_thresh,
1082 (int)dev->data->port_id, (int)queue_idx);
1087 * Free memory prior to re-allocation if needed. This is the case after
1088 * calling nfp_net_stop
1090 if (dev->data->tx_queues[queue_idx]) {
1091 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1093 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1094 dev->data->tx_queues[queue_idx] = NULL;
1097 /* Allocating tx queue data structure */
1098 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1099 RTE_CACHE_LINE_SIZE, socket_id);
1101 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1106 * Allocate TX ring hardware descriptors. A memzone large enough to
1107 * handle the maximum ring size is allocated in order to allow for
1108 * resizing in later calls to the queue setup function.
1110 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1111 sizeof(struct nfp_net_tx_desc) *
1112 NFP_NET_MAX_TX_DESC, socket_id);
1114 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1115 nfp_net_tx_queue_release(txq);
1119 txq->tx_count = nb_desc;
1121 txq->tx_free_thresh = tx_free_thresh;
1122 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1123 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1124 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1126 /* queue mapping based on firmware configuration */
1127 txq->qidx = queue_idx;
1128 txq->tx_qcidx = queue_idx * hw->stride_tx;
1129 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1131 txq->port_id = dev->data->port_id;
1132 txq->txq_flags = tx_conf->txq_flags;
1134 /* Saving physical and virtual addresses for the TX ring */
1135 txq->dma = (uint64_t)tz->phys_addr;
1136 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1138 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1139 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1140 sizeof(*txq->txbufs) * nb_desc,
1141 RTE_CACHE_LINE_SIZE, socket_id);
1142 if (txq->txbufs == NULL) {
1143 nfp_net_tx_queue_release(txq);
1146 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1147 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1149 nfp_net_reset_tx_queue(txq);
1151 dev->data->tx_queues[queue_idx] = txq;
1155 * Telling the HW about the physical address of the TX ring and number
1156 * of descriptors in log2 format
1158 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1159 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
1164 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1166 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1167 struct rte_mbuf *mb)
1170 struct nfp_net_hw *hw = txq->hw;
1172 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1175 ol_flags = mb->ol_flags;
1177 /* IPv6 does not need checksum */
1178 if (ol_flags & PKT_TX_IP_CKSUM)
1179 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1181 switch (ol_flags & PKT_TX_L4_MASK) {
1182 case PKT_TX_UDP_CKSUM:
1183 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1185 case PKT_TX_TCP_CKSUM:
1186 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1190 txd->flags |= PCIE_DESC_TX_CSUM;
1193 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1195 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1196 struct rte_mbuf *mb)
1198 struct nfp_net_hw *hw = rxq->hw;
1200 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1203 /* If IPv4 and IP checksum error, fail */
1204 if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1205 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1206 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1208 /* If neither UDP nor TCP return */
1209 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1210 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1213 if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1214 !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1215 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1217 if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1218 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1219 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1222 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1223 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1226 * nfp_net_set_hash - Set mbuf hash data
1228 * The RSS hash and hash-type are pre-pended to the packet data.
1229 * Extract and decode it and set the mbuf fields.
1232 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1233 struct rte_mbuf *mbuf)
1237 struct nfp_net_hw *hw = rxq->hw;
1239 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1242 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1245 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1246 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1249 * hash type is sharing the same word with input port info
1254 mbuf->hash.rss = hash;
1255 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1257 switch (hash_type) {
1258 case NFP_NET_RSS_IPV4:
1259 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1261 case NFP_NET_RSS_IPV6:
1262 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1264 case NFP_NET_RSS_IPV6_EX:
1265 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1268 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1272 /* nfp_net_check_port - Set mbuf in_port field */
1274 nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf)
1278 if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) {
1283 port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr +
1284 mbuf->data_off - 8));
1287 * hash type is sharing the same word with input port info
1291 port = (uint8_t)(port >> 8);
1296 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1298 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1301 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1306 * There are some decissions to take:
1307 * 1) How to check DD RX descriptors bit
1308 * 2) How and when to allocate new mbufs
1310 * Current implementation checks just one single DD bit each loop. As each
1311 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1312 * a single cache line instead. Tests with this change have not shown any
1313 * performance improvement but it requires further investigation. For example,
1314 * depending on which descriptor is next, the number of descriptors could be
1315 * less than 8 for just checking those in the same cache line. This implies
1316 * extra work which could be counterproductive by itself. Indeed, last firmware
1317 * changes are just doing this: writing several descriptors with the DD bit
1318 * for saving PCIe bandwidth and DMA operations from the NFP.
1320 * Mbuf allocation is done when a new packet is received. Then the descriptor
1321 * is automatically linked with the new mbuf and the old one is given to the
1322 * user. The main drawback with this design is mbuf allocation is heavier than
1323 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1324 * cache point of view it does not seem allocating the mbuf early on as we are
1325 * doing now have any benefit at all. Again, tests with this change have not
1326 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1327 * so looking at the implications of this type of allocation should be studied
1332 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1334 struct nfp_net_rxq *rxq;
1335 struct nfp_net_rx_desc *rxds;
1336 struct nfp_net_rx_buff *rxb;
1337 struct nfp_net_hw *hw;
1338 struct rte_mbuf *mb;
1339 struct rte_mbuf *new_mb;
1346 if (unlikely(rxq == NULL)) {
1348 * DPDK just checks the queue is lower than max queues
1349 * enabled. But the queue needs to be configured
1351 RTE_LOG(ERR, PMD, "RX Bad queue\n");
1359 while (avail < nb_pkts) {
1360 idx = rxq->rd_p % rxq->rx_count;
1362 rxb = &rxq->rxbufs[idx];
1363 if (unlikely(rxb == NULL)) {
1364 RTE_LOG(ERR, PMD, "rxb does not exist!\n");
1369 * Memory barrier to ensure that we won't do other
1370 * reads before the DD bit.
1374 rxds = &rxq->rxds[idx];
1375 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1379 * We got a packet. Let's alloc a new mbuff for refilling the
1380 * free descriptor ring as soon as possible
1382 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
1383 if (unlikely(new_mb == NULL)) {
1384 RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
1385 "queue_id=%u\n", (unsigned)rxq->port_id,
1386 (unsigned)rxq->qidx);
1387 nfp_net_mbuf_alloc_failed(rxq);
1394 * Grab the mbuff and refill the descriptor with the
1395 * previously allocated mbuff
1400 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
1401 rxds->rxd.data_len, rxq->mbuf_size);
1403 /* Size of this segment */
1404 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1405 /* Size of the whole packet. We just support 1 segment */
1406 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1408 if (unlikely((mb->data_len + hw->rx_offset) >
1411 * This should not happen and the user has the
1412 * responsibility of avoiding it. But we have
1413 * to give some info about the error
1416 "mbuf overflow likely due to the RX offset.\n"
1417 "\t\tYour mbuf size should have extra space for"
1418 " RX offset=%u bytes.\n"
1419 "\t\tCurrently you just have %u bytes available"
1420 " but the received packet is %u bytes long",
1422 rxq->mbuf_size - hw->rx_offset,
1427 /* Filling the received mbuff with packet info */
1429 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
1431 mb->data_off = RTE_PKTMBUF_HEADROOM +
1432 NFP_DESC_META_LEN(rxds);
1434 /* No scatter mode supported */
1438 /* Checking the RSS flag */
1439 nfp_net_set_hash(rxq, rxds, mb);
1441 /* Checking the checksum flag */
1442 nfp_net_rx_cksum(rxq, rxds, mb);
1444 /* Checking the port flag */
1445 nfp_net_check_port(rxds, mb);
1447 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
1448 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
1449 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
1450 mb->ol_flags |= PKT_RX_VLAN_PKT;
1453 /* Adding the mbuff to the mbuff array passed by the app */
1454 rx_pkts[avail++] = mb;
1456 /* Now resetting and updating the descriptor */
1459 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
1461 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1462 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
1470 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
1471 (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
1473 nb_hold += rxq->nb_rx_hold;
1476 * FL descriptors needs to be written before incrementing the
1477 * FL queue WR pointer
1480 if (nb_hold > rxq->rx_free_thresh) {
1481 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
1482 (unsigned)rxq->port_id, (unsigned)rxq->qidx,
1483 (unsigned)nb_hold, (unsigned)avail);
1484 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
1487 rxq->nb_rx_hold = nb_hold;
1493 * nfp_net_tx_free_bufs - Check for descriptors with a complete
1495 * @txq: TX queue to work with
1496 * Returns number of descriptors freed
1499 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
1504 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
1505 " status\n", txq->qidx);
1507 /* Work out how many packets have been sent */
1508 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
1510 if (qcp_rd_p == txq->qcp_rd_p) {
1511 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
1512 "packets (%u, %u)\n", txq->qidx,
1513 qcp_rd_p, txq->qcp_rd_p);
1517 if (qcp_rd_p > txq->qcp_rd_p)
1518 todo = qcp_rd_p - txq->qcp_rd_p;
1520 todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p;
1522 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n",
1523 qcp_rd_p, txq->qcp_rd_p, txq->rd_p);
1528 txq->qcp_rd_p += todo;
1529 txq->qcp_rd_p %= txq->tx_count;
1535 /* Leaving always free descriptors for avoiding wrapping confusion */
1536 #define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8)
1539 * nfp_net_txq_full - Check if the TX queue free descriptors
1540 * is below tx_free_threshold
1542 * @txq: TX queue to check
1544 * This function uses the host copy* of read/write pointers
1547 int nfp_net_txq_full(struct nfp_net_txq *txq)
1549 return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh;
1553 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1555 struct nfp_net_txq *txq;
1556 struct nfp_net_hw *hw;
1557 struct nfp_net_tx_desc *txds;
1558 struct rte_mbuf *pkt;
1560 int pkt_size, dma_size;
1561 uint16_t free_descs, issued_descs;
1562 struct rte_mbuf **lmbuf;
1567 txds = &txq->txds[txq->tail];
1569 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
1570 txq->qidx, txq->tail, nb_pkts);
1572 if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
1573 nfp_net_tx_free_bufs(txq);
1575 free_descs = (uint16_t)NFP_FREE_TX_DESC(txq);
1576 if (unlikely(free_descs == 0))
1583 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
1584 txq->qidx, nb_pkts);
1585 /* Sending packets */
1586 while ((i < nb_pkts) && free_descs) {
1587 /* Grabbing the mbuf linked to the current descriptor */
1588 lmbuf = &txq->txbufs[txq->tail].mbuf;
1589 /* Warming the cache for releasing the mbuf later on */
1590 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
1592 pkt = *(tx_pkts + i);
1594 if (unlikely((pkt->nb_segs > 1) &&
1595 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
1596 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
1597 rte_panic("Multisegment packet unsupported\n");
1600 /* Checking if we have enough descriptors */
1601 if (unlikely(pkt->nb_segs > free_descs))
1605 * Checksum and VLAN flags just in the first descriptor for a
1606 * multisegment packet
1608 nfp_net_tx_cksum(txq, txds, pkt);
1610 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
1611 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
1612 txds->flags |= PCIE_DESC_TX_VLAN;
1613 txds->vlan = pkt->vlan_tci;
1616 if (pkt->ol_flags & PKT_TX_TCP_SEG)
1617 rte_panic("TSO is not supported\n");
1620 * mbuf data_len is the data in one segment and pkt_len data
1621 * in the whole packet. When the packet is just one segment,
1622 * then data_len = pkt_len
1624 pkt_size = pkt->pkt_len;
1627 /* Releasing mbuf which was prefetched above */
1629 rte_pktmbuf_free_seg(*lmbuf);
1631 dma_size = pkt->data_len;
1632 dma_addr = RTE_MBUF_DATA_DMA_ADDR(pkt);
1633 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
1634 "%" PRIx64 "\n", dma_addr);
1636 /* Filling descriptors fields */
1637 txds->dma_len = dma_size;
1638 txds->data_len = pkt->pkt_len;
1639 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
1640 txds->dma_addr_lo = (dma_addr & 0xffffffff);
1641 ASSERT(free_descs > 0);
1645 * Linking mbuf with descriptor for being released
1646 * next time descriptor is used
1652 if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
1655 pkt_size -= dma_size;
1658 txds->offset_eop |= PCIE_DESC_TX_EOP;
1660 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
1663 /* Referencing next free TX descriptor */
1664 txds = &txq->txds[txq->tail];
1671 /* Increment write pointers. Force memory write before we let HW know */
1673 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
1678 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1680 nfp_net_reta_update(struct rte_eth_dev *dev,
1681 struct rte_eth_rss_reta_entry64 *reta_conf,
1684 uint32_t reta, mask;
1688 struct nfp_net_hw *hw =
1689 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1691 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1694 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1695 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
1696 "(%d) doesn't match the number hardware can supported "
1697 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1702 * Update Redirection Table. There are 128 8bit-entries which can be
1703 * manage as 32 32bit-entries
1705 for (i = 0; i < reta_size; i += 4) {
1706 /* Handling 4 RSS entries per loop */
1707 idx = i / RTE_RETA_GROUP_SIZE;
1708 shift = i % RTE_RETA_GROUP_SIZE;
1709 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1715 /* If all 4 entries were set, don't need read RETA register */
1717 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1719 for (j = 0; j < 4; j++) {
1720 if (!(mask & (0x1 << j)))
1723 /* Clearing the entry bits */
1724 reta &= ~(0xFF << (8 * j));
1725 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1727 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
1730 update = NFP_NET_CFG_UPDATE_RSS;
1732 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1738 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1740 nfp_net_reta_query(struct rte_eth_dev *dev,
1741 struct rte_eth_rss_reta_entry64 *reta_conf,
1747 struct nfp_net_hw *hw;
1749 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1751 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1754 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1755 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
1756 "(%d) doesn't match the number hardware can supported "
1757 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1762 * Reading Redirection Table. There are 128 8bit-entries which can be
1763 * manage as 32 32bit-entries
1765 for (i = 0; i < reta_size; i += 4) {
1766 /* Handling 4 RSS entries per loop */
1767 idx = i / RTE_RETA_GROUP_SIZE;
1768 shift = i % RTE_RETA_GROUP_SIZE;
1769 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1774 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
1775 for (j = 0; j < 4; j++) {
1776 if (!(mask & (0x1 << j)))
1778 reta_conf->reta[shift + j] =
1779 (uint8_t)((reta >> (8 * j)) & 0xF);
1786 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1787 struct rte_eth_rss_conf *rss_conf)
1790 uint32_t cfg_rss_ctrl = 0;
1794 struct nfp_net_hw *hw;
1796 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1798 rss_hf = rss_conf->rss_hf;
1800 /* Checking if RSS is enabled */
1801 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1802 if (rss_hf != 0) { /* Enable RSS? */
1803 RTE_LOG(ERR, PMD, "RSS unsupported\n");
1806 return 0; /* Nothing to do */
1809 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1810 RTE_LOG(ERR, PMD, "hash key too long\n");
1814 if (rss_hf & ETH_RSS_IPV4)
1815 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
1816 NFP_NET_CFG_RSS_IPV4_TCP |
1817 NFP_NET_CFG_RSS_IPV4_UDP;
1819 if (rss_hf & ETH_RSS_IPV6)
1820 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
1821 NFP_NET_CFG_RSS_IPV6_TCP |
1822 NFP_NET_CFG_RSS_IPV6_UDP;
1824 /* configuring where to apply the RSS hash */
1825 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1827 /* Writing the key byte a byte */
1828 for (i = 0; i < rss_conf->rss_key_len; i++) {
1829 memcpy(&key, &rss_conf->rss_key[i], 1);
1830 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1833 /* Writing the key size */
1834 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1836 update = NFP_NET_CFG_UPDATE_RSS;
1838 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1845 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1846 struct rte_eth_rss_conf *rss_conf)
1849 uint32_t cfg_rss_ctrl;
1852 struct nfp_net_hw *hw;
1854 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1859 rss_hf = rss_conf->rss_hf;
1860 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1862 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1863 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
1865 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1866 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1868 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1869 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1871 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1872 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1874 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1875 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1877 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1878 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
1880 /* Reading the key size */
1881 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1883 /* Reading the key byte a byte */
1884 for (i = 0; i < rss_conf->rss_key_len; i++) {
1885 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1886 memcpy(&rss_conf->rss_key[i], &key, 1);
1892 /* Initialise and register driver with DPDK Application */
1893 static struct eth_dev_ops nfp_net_eth_dev_ops = {
1894 .dev_configure = nfp_net_configure,
1895 .dev_start = nfp_net_start,
1896 .dev_stop = nfp_net_stop,
1897 .dev_close = nfp_net_close,
1898 .stats_get = nfp_net_stats_get,
1899 .stats_reset = nfp_net_stats_reset,
1900 .reta_update = nfp_net_reta_update,
1901 .reta_query = nfp_net_reta_query,
1902 .rss_hash_update = nfp_net_rss_hash_update,
1903 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
1904 .rx_queue_setup = nfp_net_rx_queue_setup,
1905 .rx_queue_release = nfp_net_rx_queue_release,
1906 .rx_queue_count = nfp_net_rx_queue_count,
1907 .tx_queue_setup = nfp_net_tx_queue_setup,
1908 .tx_queue_release = nfp_net_tx_queue_release,
1912 nfp_net_init(struct rte_eth_dev *eth_dev)
1914 struct rte_pci_device *pci_dev;
1915 struct nfp_net_hw *hw;
1917 uint32_t tx_bar_off, rx_bar_off;
1921 PMD_INIT_FUNC_TRACE();
1923 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1925 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
1926 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
1927 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
1929 /* For secondary processes, the primary has done all the work */
1930 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1933 pci_dev = eth_dev->pci_dev;
1934 hw->device_id = pci_dev->id.device_id;
1935 hw->vendor_id = pci_dev->id.vendor_id;
1936 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1937 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1939 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
1940 pci_dev->id.vendor_id, pci_dev->id.device_id,
1941 pci_dev->addr.domain, pci_dev->addr.bus,
1942 pci_dev->addr.devid, pci_dev->addr.function);
1944 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
1945 if (hw->ctrl_bar == NULL) {
1947 "hw->ctrl_bar is NULL. BAR0 not configured\n");
1950 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
1951 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
1953 /* Work out where in the BAR the queues start. */
1954 switch (pci_dev->id.device_id) {
1955 case PCI_DEVICE_ID_NFP6000_VF_NIC:
1956 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1957 tx_bar_off = NFP_PCIE_QUEUE(start_q);
1958 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
1959 rx_bar_off = NFP_PCIE_QUEUE(start_q);
1962 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
1966 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
1967 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
1969 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
1970 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
1972 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
1973 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
1975 nfp_net_cfg_queue_setup(hw);
1977 /* Get some of the read-only fields from the config BAR */
1978 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
1979 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
1980 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
1981 hw->mtu = hw->max_mtu;
1983 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
1984 hw->rx_offset = NFP_NET_RX_OFFSET;
1986 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
1988 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
1989 hw->ver, hw->max_mtu);
1990 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
1991 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
1992 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
1993 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
1994 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
1995 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
1996 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
1997 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
1998 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
1999 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2001 pci_dev = eth_dev->pci_dev;
2004 hw->stride_rx = stride;
2005 hw->stride_tx = stride;
2007 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
2008 hw->max_rx_queues, hw->max_tx_queues);
2010 /* Allocating memory for mac addr */
2011 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2012 if (eth_dev->data->mac_addrs == NULL) {
2013 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2017 /* Using random mac addresses for VFs */
2018 eth_random_addr(&hw->mac_addr[0]);
2020 /* Copying mac address to DPDK eth_dev struct */
2021 ether_addr_copy(ð_dev->data->mac_addrs[0],
2022 (struct ether_addr *)hw->mac_addr);
2024 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2025 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2026 eth_dev->data->port_id, pci_dev->id.vendor_id,
2027 pci_dev->id.device_id,
2028 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2029 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2031 /* Recording current stats counters values */
2032 nfp_net_stats_reset(eth_dev);
2037 static struct rte_pci_id pci_id_nfp_net_map[] = {
2039 .vendor_id = PCI_VENDOR_ID_NETRONOME,
2040 .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
2041 .subsystem_vendor_id = PCI_ANY_ID,
2042 .subsystem_device_id = PCI_ANY_ID,
2045 .vendor_id = PCI_VENDOR_ID_NETRONOME,
2046 .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC,
2047 .subsystem_vendor_id = PCI_ANY_ID,
2048 .subsystem_device_id = PCI_ANY_ID,
2055 static struct eth_driver rte_nfp_net_pmd = {
2057 .name = "rte_nfp_net_pmd",
2058 .id_table = pci_id_nfp_net_map,
2059 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2061 .eth_dev_init = nfp_net_init,
2062 .dev_private_size = sizeof(struct nfp_net_adapter),
2066 nfp_net_pmd_init(const char *name __rte_unused,
2067 const char *params __rte_unused)
2069 PMD_INIT_FUNC_TRACE();
2070 PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n",
2071 NFP_NET_PMD_VERSION);
2073 rte_eth_driver_register(&rte_nfp_net_pmd);
2077 static struct rte_driver rte_nfp_net_driver = {
2079 .init = nfp_net_pmd_init,
2082 PMD_REGISTER_DRIVER(rte_nfp_net_driver);
2086 * c-file-style: "Linux"
2087 * indent-tabs-mode: t