2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
48 #include <sys/socket.h>
55 #include <rte_byteorder.h>
56 #include <rte_common.h>
58 #include <rte_debug.h>
59 #include <rte_ethdev.h>
61 #include <rte_ether.h>
62 #include <rte_malloc.h>
63 #include <rte_memzone.h>
64 #include <rte_mempool.h>
65 #include <rte_version.h>
66 #include <rte_string_fns.h>
67 #include <rte_alarm.h>
69 #include "nfp_net_pmd.h"
70 #include "nfp_net_logs.h"
71 #include "nfp_net_ctrl.h"
74 static void nfp_net_close(struct rte_eth_dev *dev);
75 static int nfp_net_configure(struct rte_eth_dev *dev);
76 static int nfp_net_init(struct rte_eth_dev *eth_dev);
77 static int nfp_net_start(struct rte_eth_dev *dev);
78 static void nfp_net_stop(struct rte_eth_dev *dev);
81 * The offset of the queue controller queues in the PCIe Target. These
82 * happen to be at the same offset on the NFP6000 and the NFP3200 so
83 * we use a single macro here.
85 #define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff)))
87 /* Maximum value which can be added to a queue with one transaction */
88 #define NFP_QCP_MAX_ADD 0x7f
90 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
91 (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
93 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
100 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
101 * @q: Base address for queue structure
102 * @ptr: Add to the Read or Write pointer
103 * @val: Value to add to the queue pointer
105 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
108 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
112 if (ptr == NFP_QCP_READ_PTR)
113 off = NFP_QCP_QUEUE_ADD_RPTR;
115 off = NFP_QCP_QUEUE_ADD_WPTR;
117 while (val > NFP_QCP_MAX_ADD) {
118 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
119 val -= NFP_QCP_MAX_ADD;
122 nn_writel(rte_cpu_to_le_32(val), q + off);
126 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
127 * @q: Base address for queue structure
128 * @ptr: Read or Write pointer
130 static inline uint32_t
131 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
136 if (ptr == NFP_QCP_READ_PTR)
137 off = NFP_QCP_QUEUE_STS_LO;
139 off = NFP_QCP_QUEUE_STS_HI;
141 val = rte_cpu_to_le_32(nn_readl(q + off));
143 if (ptr == NFP_QCP_READ_PTR)
144 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
146 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
150 * Functions to read/write from/to Config BAR
151 * Performs any endian conversion necessary.
153 static inline uint8_t
154 nn_cfg_readb(struct nfp_net_hw *hw, int off)
156 return nn_readb(hw->ctrl_bar + off);
160 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
162 nn_writeb(val, hw->ctrl_bar + off);
165 static inline uint32_t
166 nn_cfg_readl(struct nfp_net_hw *hw, int off)
168 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
172 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
174 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
177 static inline uint64_t
178 nn_cfg_readq(struct nfp_net_hw *hw, int off)
180 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
184 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
186 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
190 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
194 struct timespec wait;
196 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
199 if (hw->qcp_cfg == NULL)
200 rte_panic("Bad configuration queue pointer\n");
202 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
205 wait.tv_nsec = 1000000;
207 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
209 /* Poll update field, waiting for NFP to ack the config */
210 for (cnt = 0; ; cnt++) {
211 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
214 if (new & NFP_NET_CFG_UPDATE_ERR) {
215 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
218 if (cnt >= NFP_NET_POLL_TIMEOUT) {
219 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
220 " %dms\n", update, cnt);
221 rte_panic("Exiting\n");
223 nanosleep(&wait, 0); /* waiting for a 1ms */
225 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
230 * Reconfigure the NIC
231 * @nn: device to reconfigure
232 * @ctrl: The value for the ctrl field in the BAR config
233 * @update: The value for the update field in the BAR config
235 * Write the update word to the BAR and ping the reconfig queue. Then poll
236 * until the firmware has acknowledged the update by zeroing the update word.
239 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
243 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
246 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
247 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
251 err = __nfp_net_reconfig(hw, update);
257 * Reconfig errors imply situations where they can be handled.
258 * Otherwise, rte_panic is called inside __nfp_net_reconfig
260 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
266 * Configure an Ethernet device. This function must be invoked first
267 * before any other function in the Ethernet API. This function can
268 * also be re-invoked when a device is in the stopped state.
271 nfp_net_configure(struct rte_eth_dev *dev)
273 struct rte_eth_conf *dev_conf;
274 struct rte_eth_rxmode *rxmode;
275 struct rte_eth_txmode *txmode;
276 uint32_t new_ctrl = 0;
278 struct nfp_net_hw *hw;
280 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
283 * A DPDK app sends info about how many queues to use and how
284 * those queues need to be configured. This is used by the
285 * DPDK core and it makes sure no more queues than those
286 * advertised by the driver are requested. This function is
287 * called after that internal process
290 PMD_INIT_LOG(DEBUG, "Configure\n");
292 dev_conf = &dev->data->dev_conf;
293 rxmode = &dev_conf->rxmode;
294 txmode = &dev_conf->txmode;
296 /* Checking TX mode */
297 if (txmode->mq_mode) {
298 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
302 /* Checking RX mode */
303 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
304 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
305 update = NFP_NET_CFG_UPDATE_RSS;
306 new_ctrl = NFP_NET_CFG_CTRL_RSS;
308 PMD_INIT_LOG(INFO, "RSS not supported\n");
313 if (rxmode->split_hdr_size) {
314 PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
318 if (rxmode->hw_ip_checksum) {
319 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
320 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
322 PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
327 if (rxmode->hw_vlan_filter) {
328 PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
332 if (rxmode->hw_vlan_strip) {
333 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
334 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
336 PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
341 if (rxmode->hw_vlan_extend) {
342 PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
346 /* Supporting VLAN insertion by default */
347 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
348 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
350 if (rxmode->jumbo_frame)
351 /* this is handled in rte_eth_dev_configure */
353 if (rxmode->hw_strip_crc) {
354 PMD_INIT_LOG(INFO, "strip CRC not supported\n");
358 if (rxmode->enable_scatter) {
359 PMD_INIT_LOG(INFO, "Scatter not supported\n");
366 update |= NFP_NET_CFG_UPDATE_GEN;
368 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
369 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
378 nfp_net_enable_queues(struct rte_eth_dev *dev)
380 struct nfp_net_hw *hw;
381 uint64_t enabled_queues = 0;
384 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
386 /* Enabling the required TX queues in the device */
387 for (i = 0; i < dev->data->nb_tx_queues; i++)
388 enabled_queues |= (1 << i);
390 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
394 /* Enabling the required RX queues in the device */
395 for (i = 0; i < dev->data->nb_rx_queues; i++)
396 enabled_queues |= (1 << i);
398 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
402 nfp_net_disable_queues(struct rte_eth_dev *dev)
404 struct nfp_net_hw *hw;
405 uint32_t new_ctrl, update = 0;
407 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
409 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
410 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
412 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
413 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
414 NFP_NET_CFG_UPDATE_MSIX;
416 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
417 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
419 /* If an error when reconfig we avoid to change hw state */
420 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
427 nfp_net_params_setup(struct nfp_net_hw *hw)
429 uint32_t *mac_address;
431 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
432 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
434 /* A MAC address is 8 bytes long */
435 mac_address = (uint32_t *)(hw->mac_addr);
437 nn_cfg_writel(hw, NFP_NET_CFG_MACADDR,
438 rte_cpu_to_be_32(*mac_address));
439 nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4,
440 rte_cpu_to_be_32(*(mac_address + 4)));
444 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
446 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
450 nfp_net_start(struct rte_eth_dev *dev)
452 uint32_t new_ctrl, update = 0;
453 struct nfp_net_hw *hw;
455 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
457 PMD_INIT_LOG(DEBUG, "Start\n");
459 /* Disabling queues just in case... */
460 nfp_net_disable_queues(dev);
462 /* Writing configuration parameters in the device */
463 nfp_net_params_setup(hw);
465 /* Enabling the required queues in the device */
466 nfp_net_enable_queues(dev);
469 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX;
470 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
472 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
473 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
475 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
476 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
484 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
486 nfp_net_stop(struct rte_eth_dev *dev)
488 PMD_INIT_LOG(DEBUG, "Stop\n");
490 nfp_net_disable_queues(dev);
493 /* Reset and stop device. The device can not be restarted. */
495 nfp_net_close(struct rte_eth_dev *dev)
497 struct nfp_net_hw *hw;
499 PMD_INIT_LOG(DEBUG, "Close\n");
501 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
504 * We assume that the DPDK application is stopping all the
505 * threads/queues before calling the device close function.
510 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
513 * The ixgbe PMD driver disables the pcie master on the
514 * device. The i40e does not...
518 /* Initialise and register driver with DPDK Application */
519 static struct eth_dev_ops nfp_net_eth_dev_ops = {
520 .dev_configure = nfp_net_configure,
521 .dev_start = nfp_net_start,
522 .dev_stop = nfp_net_stop,
523 .dev_close = nfp_net_close,
527 nfp_net_init(struct rte_eth_dev *eth_dev)
529 struct rte_pci_device *pci_dev;
530 struct nfp_net_hw *hw;
532 uint32_t tx_bar_off, rx_bar_off;
536 PMD_INIT_FUNC_TRACE();
538 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
540 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
542 /* For secondary processes, the primary has done all the work */
543 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
546 pci_dev = eth_dev->pci_dev;
547 hw->device_id = pci_dev->id.device_id;
548 hw->vendor_id = pci_dev->id.vendor_id;
549 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
550 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
552 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
553 pci_dev->id.vendor_id, pci_dev->id.device_id,
554 pci_dev->addr.domain, pci_dev->addr.bus,
555 pci_dev->addr.devid, pci_dev->addr.function);
557 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
558 if (hw->ctrl_bar == NULL) {
560 "hw->ctrl_bar is NULL. BAR0 not configured\n");
563 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
564 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
566 /* Work out where in the BAR the queues start. */
567 switch (pci_dev->id.device_id) {
568 case PCI_DEVICE_ID_NFP6000_VF_NIC:
569 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
570 tx_bar_off = NFP_PCIE_QUEUE(start_q);
571 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
572 rx_bar_off = NFP_PCIE_QUEUE(start_q);
575 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
579 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
580 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
582 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
583 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
585 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
586 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
588 nfp_net_cfg_queue_setup(hw);
590 /* Get some of the read-only fields from the config BAR */
591 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
592 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
593 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
594 hw->mtu = hw->max_mtu;
596 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
597 hw->rx_offset = NFP_NET_RX_OFFSET;
599 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
601 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
602 hw->ver, hw->max_mtu);
603 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
604 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
605 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
606 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
607 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
608 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
609 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
610 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
611 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
612 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
614 pci_dev = eth_dev->pci_dev;
617 hw->stride_rx = stride;
618 hw->stride_tx = stride;
620 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
621 hw->max_rx_queues, hw->max_tx_queues);
623 /* Allocating memory for mac addr */
624 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
625 if (eth_dev->data->mac_addrs == NULL) {
626 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
630 /* Using random mac addresses for VFs */
631 eth_random_addr(&hw->mac_addr[0]);
633 /* Copying mac address to DPDK eth_dev struct */
634 ether_addr_copy(ð_dev->data->mac_addrs[0],
635 (struct ether_addr *)hw->mac_addr);
637 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
638 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
639 eth_dev->data->port_id, pci_dev->id.vendor_id,
640 pci_dev->id.device_id,
641 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
642 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
647 static struct rte_pci_id pci_id_nfp_net_map[] = {
649 .vendor_id = PCI_VENDOR_ID_NETRONOME,
650 .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
651 .subsystem_vendor_id = PCI_ANY_ID,
652 .subsystem_device_id = PCI_ANY_ID,
655 .vendor_id = PCI_VENDOR_ID_NETRONOME,
656 .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC,
657 .subsystem_vendor_id = PCI_ANY_ID,
658 .subsystem_device_id = PCI_ANY_ID,
665 static struct eth_driver rte_nfp_net_pmd = {
667 .name = "rte_nfp_net_pmd",
668 .id_table = pci_id_nfp_net_map,
669 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
671 .eth_dev_init = nfp_net_init,
672 .dev_private_size = sizeof(struct nfp_net_adapter),
676 nfp_net_pmd_init(const char *name __rte_unused,
677 const char *params __rte_unused)
679 PMD_INIT_FUNC_TRACE();
680 PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n",
681 NFP_NET_PMD_VERSION);
683 rte_eth_driver_register(&rte_nfp_net_pmd);
687 static struct rte_driver rte_nfp_net_driver = {
689 .init = nfp_net_pmd_init,
692 PMD_REGISTER_DRIVER(rte_nfp_net_driver);
696 * c-file-style: "Linux"
697 * indent-tabs-mode: t