1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_net.c
13 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
33 #include "eal_firmware.h"
35 #include "nfpcore/nfp_cpp.h"
36 #include "nfpcore/nfp_nffw.h"
37 #include "nfpcore/nfp_hwinfo.h"
38 #include "nfpcore/nfp_mip.h"
39 #include "nfpcore/nfp_rtsym.h"
40 #include "nfpcore/nfp_nsp.h"
42 #include "nfp_net_pmd.h"
44 #include "nfp_net_logs.h"
45 #include "nfp_net_ctrl.h"
47 #include <sys/types.h>
48 #include <sys/socket.h>
52 #include <sys/ioctl.h>
56 static int nfp_net_close(struct rte_eth_dev *dev);
57 static int nfp_net_configure(struct rte_eth_dev *dev);
58 static void nfp_net_dev_interrupt_handler(void *param);
59 static void nfp_net_dev_interrupt_delayed_handler(void *param);
60 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
61 static int nfp_net_infos_get(struct rte_eth_dev *dev,
62 struct rte_eth_dev_info *dev_info);
63 static int nfp_net_init(struct rte_eth_dev *eth_dev);
64 static int nfp_pf_init(struct rte_pci_device *pci_dev);
65 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev);
66 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
67 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
68 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
69 static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
70 static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
71 static int nfp_net_start(struct rte_eth_dev *dev);
72 static int nfp_net_stats_get(struct rte_eth_dev *dev,
73 struct rte_eth_stats *stats);
74 static int nfp_net_stats_reset(struct rte_eth_dev *dev);
75 static int nfp_net_stop(struct rte_eth_dev *dev);
76 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
77 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
78 struct rte_eth_rss_conf *rss_conf);
79 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
80 struct rte_eth_rss_reta_entry64 *reta_conf,
82 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
83 struct rte_eth_rss_conf *rss_conf);
84 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
85 struct rte_ether_addr *mac_addr);
86 static int32_t nfp_cpp_bridge_service_func(void *args);
87 static void nfp_register_cpp_service(struct nfp_cpp *cpp);
88 static int nfp_fw_setup(struct rte_pci_device *dev,
90 struct nfp_eth_table *nfp_eth_table,
91 struct nfp_hwinfo *hwinfo);
94 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
100 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
103 if (hw->qcp_cfg == NULL)
104 rte_panic("Bad configuration queue pointer\n");
106 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
109 wait.tv_nsec = 1000000;
111 PMD_DRV_LOG(DEBUG, "Polling for update ack...");
113 /* Poll update field, waiting for NFP to ack the config */
114 for (cnt = 0; ; cnt++) {
115 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
118 if (new & NFP_NET_CFG_UPDATE_ERR) {
119 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
122 if (cnt >= NFP_NET_POLL_TIMEOUT) {
123 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
124 " %dms", update, cnt);
125 rte_panic("Exiting\n");
127 nanosleep(&wait, 0); /* waiting for a 1ms */
129 PMD_DRV_LOG(DEBUG, "Ack DONE");
134 * Reconfigure the NIC
135 * @nn: device to reconfigure
136 * @ctrl: The value for the ctrl field in the BAR config
137 * @update: The value for the update field in the BAR config
139 * Write the update word to the BAR and ping the reconfig queue. Then poll
140 * until the firmware has acknowledged the update by zeroing the update word.
143 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
147 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
150 rte_spinlock_lock(&hw->reconfig_lock);
152 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
153 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
157 err = __nfp_net_reconfig(hw, update);
159 rte_spinlock_unlock(&hw->reconfig_lock);
165 * Reconfig errors imply situations where they can be handled.
166 * Otherwise, rte_panic is called inside __nfp_net_reconfig
168 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
174 * Configure an Ethernet device. This function must be invoked first
175 * before any other function in the Ethernet API. This function can
176 * also be re-invoked when a device is in the stopped state.
179 nfp_net_configure(struct rte_eth_dev *dev)
181 struct rte_eth_conf *dev_conf;
182 struct rte_eth_rxmode *rxmode;
183 struct rte_eth_txmode *txmode;
184 struct nfp_net_hw *hw;
186 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
189 * A DPDK app sends info about how many queues to use and how
190 * those queues need to be configured. This is used by the
191 * DPDK core and it makes sure no more queues than those
192 * advertised by the driver are requested. This function is
193 * called after that internal process
196 PMD_INIT_LOG(DEBUG, "Configure");
198 dev_conf = &dev->data->dev_conf;
199 rxmode = &dev_conf->rxmode;
200 txmode = &dev_conf->txmode;
202 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
203 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
205 /* Checking TX mode */
206 if (txmode->mq_mode) {
207 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
211 /* Checking RX mode */
212 if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
213 !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
214 PMD_INIT_LOG(INFO, "RSS not supported");
222 nfp_net_enable_queues(struct rte_eth_dev *dev)
224 struct nfp_net_hw *hw;
225 uint64_t enabled_queues = 0;
228 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
230 /* Enabling the required TX queues in the device */
231 for (i = 0; i < dev->data->nb_tx_queues; i++)
232 enabled_queues |= (1 << i);
234 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
238 /* Enabling the required RX queues in the device */
239 for (i = 0; i < dev->data->nb_rx_queues; i++)
240 enabled_queues |= (1 << i);
242 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
246 nfp_net_disable_queues(struct rte_eth_dev *dev)
248 struct nfp_net_hw *hw;
249 uint32_t new_ctrl, update = 0;
251 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
253 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
254 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
256 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
257 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
258 NFP_NET_CFG_UPDATE_MSIX;
260 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
261 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
263 /* If an error when reconfig we avoid to change hw state */
264 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
271 nfp_net_params_setup(struct nfp_net_hw *hw)
273 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
274 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
278 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
280 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
283 #define ETH_ADDR_LEN 6
286 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
290 for (i = 0; i < ETH_ADDR_LEN; i++)
295 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
297 struct nfp_eth_table *nfp_eth_table;
298 struct nfp_net_hw *hw = NULL;
300 /* Grab a pointer to the correct physical port */
301 hw = pf_dev->ports[port];
303 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
305 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
306 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
313 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
317 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
318 memcpy(&hw->mac_addr[0], &tmp, 4);
320 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
321 memcpy(&hw->mac_addr[4], &tmp, 2);
325 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
327 uint32_t mac0 = *(uint32_t *)mac;
330 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
333 mac1 = *(uint16_t *)mac;
334 nn_writew(rte_cpu_to_be_16(mac1),
335 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
339 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
341 struct nfp_net_hw *hw;
342 uint32_t update, ctrl;
344 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
345 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
346 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
347 PMD_INIT_LOG(INFO, "MAC address unable to change when"
352 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
353 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
356 /* Writing new MAC to the specific port BAR address */
357 nfp_net_write_mac(hw, (uint8_t *)mac_addr);
359 /* Signal the NIC about the change */
360 update = NFP_NET_CFG_UPDATE_MACADDR;
362 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
363 (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
364 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
365 if (nfp_net_reconfig(hw, ctrl, update) < 0) {
366 PMD_INIT_LOG(INFO, "MAC address update failed");
373 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
374 struct rte_intr_handle *intr_handle)
376 struct nfp_net_hw *hw;
379 if (!intr_handle->intr_vec) {
380 intr_handle->intr_vec =
381 rte_zmalloc("intr_vec",
382 dev->data->nb_rx_queues * sizeof(int), 0);
383 if (!intr_handle->intr_vec) {
384 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
385 " intr_vec", dev->data->nb_rx_queues);
390 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
392 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
393 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
394 /* UIO just supports one queue and no LSC*/
395 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
396 intr_handle->intr_vec[0] = 0;
398 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
399 for (i = 0; i < dev->data->nb_rx_queues; i++) {
401 * The first msix vector is reserved for non
404 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
405 intr_handle->intr_vec[i] = i + 1;
406 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
407 intr_handle->intr_vec[i]);
411 /* Avoiding TX interrupts */
412 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
417 nfp_check_offloads(struct rte_eth_dev *dev)
419 struct nfp_net_hw *hw;
420 struct rte_eth_conf *dev_conf;
421 struct rte_eth_rxmode *rxmode;
422 struct rte_eth_txmode *txmode;
425 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
427 dev_conf = &dev->data->dev_conf;
428 rxmode = &dev_conf->rxmode;
429 txmode = &dev_conf->txmode;
431 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
432 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
433 ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
436 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
437 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
438 ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
441 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
442 hw->mtu = rxmode->max_rx_pkt_len;
444 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
445 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
448 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
449 ctrl |= NFP_NET_CFG_CTRL_L2BC;
452 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
453 ctrl |= NFP_NET_CFG_CTRL_L2MC;
455 /* TX checksum offload */
456 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
457 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
458 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
459 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
462 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
463 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
464 ctrl |= NFP_NET_CFG_CTRL_LSO;
466 ctrl |= NFP_NET_CFG_CTRL_LSO2;
470 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
471 ctrl |= NFP_NET_CFG_CTRL_GATHER;
477 nfp_net_start(struct rte_eth_dev *dev)
479 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
481 uint32_t new_ctrl, update = 0;
482 struct nfp_net_hw *hw;
483 struct nfp_pf_dev *pf_dev;
484 struct rte_eth_conf *dev_conf;
485 struct rte_eth_rxmode *rxmode;
486 uint32_t intr_vector;
489 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
492 PMD_INIT_LOG(DEBUG, "Start");
494 /* Disabling queues just in case... */
495 nfp_net_disable_queues(dev);
497 /* Enabling the required queues in the device */
498 nfp_net_enable_queues(dev);
500 /* check and configure queue intr-vector mapping */
501 if (dev->data->dev_conf.intr_conf.rxq != 0) {
502 if (pf_dev->multiport) {
503 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
504 "with NFP multiport PF");
507 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
509 * Better not to share LSC with RX interrupts.
510 * Unregistering LSC interrupt handler
512 rte_intr_callback_unregister(&pci_dev->intr_handle,
513 nfp_net_dev_interrupt_handler, (void *)dev);
515 if (dev->data->nb_rx_queues > 1) {
516 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
517 "supports 1 queue with UIO");
521 intr_vector = dev->data->nb_rx_queues;
522 if (rte_intr_efd_enable(intr_handle, intr_vector))
525 nfp_configure_rx_interrupt(dev, intr_handle);
526 update = NFP_NET_CFG_UPDATE_MSIX;
529 rte_intr_enable(intr_handle);
531 new_ctrl = nfp_check_offloads(dev);
533 /* Writing configuration parameters in the device */
534 nfp_net_params_setup(hw);
536 dev_conf = &dev->data->dev_conf;
537 rxmode = &dev_conf->rxmode;
539 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
540 nfp_net_rss_config_default(dev);
541 update |= NFP_NET_CFG_UPDATE_RSS;
542 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
546 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
548 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
550 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
551 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
553 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
554 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
558 * Allocating rte mbufs for configured rx queues.
559 * This requires queues being enabled before
561 if (nfp_net_rx_freelist_setup(dev) < 0) {
566 if (hw->is_phyport) {
567 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
568 /* Configure the physical port up */
569 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
571 nfp_eth_set_configured(dev->process_private,
581 * An error returned by this function should mean the app
582 * exiting and then the system releasing all the memory
583 * allocated even memory coming from hugepages.
585 * The device could be enabled at this point with some queues
586 * ready for getting packets. This is true if the call to
587 * nfp_net_rx_freelist_setup() succeeds for some queues but
588 * fails for subsequent queues.
590 * This should make the app exiting but better if we tell the
593 nfp_net_disable_queues(dev);
598 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
600 nfp_net_stop(struct rte_eth_dev *dev)
603 struct nfp_net_hw *hw;
605 PMD_INIT_LOG(DEBUG, "Stop");
607 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
609 nfp_net_disable_queues(dev);
612 for (i = 0; i < dev->data->nb_tx_queues; i++) {
613 nfp_net_reset_tx_queue(
614 (struct nfp_net_txq *)dev->data->tx_queues[i]);
617 for (i = 0; i < dev->data->nb_rx_queues; i++) {
618 nfp_net_reset_rx_queue(
619 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
622 if (hw->is_phyport) {
623 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
624 /* Configure the physical port down */
625 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
627 nfp_eth_set_configured(dev->process_private,
634 /* Set the link up. */
636 nfp_net_set_link_up(struct rte_eth_dev *dev)
638 struct nfp_net_hw *hw;
640 PMD_DRV_LOG(DEBUG, "Set link up");
642 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
648 /* Configure the physical port down */
649 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
651 return nfp_eth_set_configured(dev->process_private,
655 /* Set the link down. */
657 nfp_net_set_link_down(struct rte_eth_dev *dev)
659 struct nfp_net_hw *hw;
661 PMD_DRV_LOG(DEBUG, "Set link down");
663 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
668 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
669 /* Configure the physical port down */
670 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
672 return nfp_eth_set_configured(dev->process_private,
676 /* Reset and stop device. The device can not be restarted. */
678 nfp_net_close(struct rte_eth_dev *dev)
680 struct nfp_net_hw *hw;
681 struct rte_pci_device *pci_dev;
684 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
687 PMD_INIT_LOG(DEBUG, "Close");
689 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
693 * We assume that the DPDK application is stopping all the
694 * threads/queues before calling the device close function.
697 nfp_net_disable_queues(dev);
700 for (i = 0; i < dev->data->nb_tx_queues; i++) {
701 nfp_net_reset_tx_queue(
702 (struct nfp_net_txq *)dev->data->tx_queues[i]);
705 for (i = 0; i < dev->data->nb_rx_queues; i++) {
706 nfp_net_reset_rx_queue(
707 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
710 /* Only free PF resources after all physical ports have been closed */
711 if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
712 pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
713 struct nfp_pf_dev *pf_dev;
714 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
716 /* Mark this port as unused and free device priv resources*/
717 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
718 pf_dev->ports[hw->idx] = NULL;
719 rte_eth_dev_release_port(dev);
721 for (i = 0; i < pf_dev->total_phyports; i++) {
722 /* Check to see if ports are still in use */
723 if (pf_dev->ports[i])
727 /* Now it is safe to free all PF resources */
728 PMD_INIT_LOG(INFO, "Freeing PF resources");
729 nfp_cpp_area_free(pf_dev->ctrl_area);
730 nfp_cpp_area_free(pf_dev->hwqueues_area);
731 free(pf_dev->hwinfo);
732 free(pf_dev->sym_tbl);
733 nfp_cpp_free(pf_dev->cpp);
737 rte_intr_disable(&pci_dev->intr_handle);
739 /* unregister callback func from eal lib */
740 rte_intr_callback_unregister(&pci_dev->intr_handle,
741 nfp_net_dev_interrupt_handler,
745 * The ixgbe PMD driver disables the pcie master on the
746 * device. The i40e does not...
753 nfp_net_promisc_enable(struct rte_eth_dev *dev)
755 uint32_t new_ctrl, update = 0;
756 struct nfp_net_hw *hw;
759 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
761 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
763 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
764 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
768 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
769 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
773 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
774 update = NFP_NET_CFG_UPDATE_GEN;
777 * DPDK sets promiscuous mode on just after this call assuming
778 * it can not fail ...
780 ret = nfp_net_reconfig(hw, new_ctrl, update);
790 nfp_net_promisc_disable(struct rte_eth_dev *dev)
792 uint32_t new_ctrl, update = 0;
793 struct nfp_net_hw *hw;
796 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
799 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
803 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
804 update = NFP_NET_CFG_UPDATE_GEN;
807 * DPDK sets promiscuous mode off just before this call
808 * assuming it can not fail ...
810 ret = nfp_net_reconfig(hw, new_ctrl, update);
820 * return 0 means link status changed, -1 means not changed
822 * Wait to complete is needed as it can take up to 9 seconds to get the Link
826 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
828 struct nfp_net_hw *hw;
829 struct rte_eth_link link;
830 uint32_t nn_link_status;
833 static const uint32_t ls_to_ethtool[] = {
834 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
835 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
836 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
837 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
838 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
839 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
840 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
841 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
844 PMD_DRV_LOG(DEBUG, "Link update");
846 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
848 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
850 memset(&link, 0, sizeof(struct rte_eth_link));
852 if (nn_link_status & NFP_NET_CFG_STS_LINK)
853 link.link_status = ETH_LINK_UP;
855 link.link_duplex = ETH_LINK_FULL_DUPLEX;
857 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
858 NFP_NET_CFG_STS_LINK_RATE_MASK;
860 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
861 link.link_speed = ETH_SPEED_NUM_NONE;
863 link.link_speed = ls_to_ethtool[nn_link_status];
865 ret = rte_eth_linkstatus_set(dev, &link);
867 if (link.link_status)
868 PMD_DRV_LOG(INFO, "NIC Link is Up");
870 PMD_DRV_LOG(INFO, "NIC Link is Down");
876 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
879 struct nfp_net_hw *hw;
880 struct rte_eth_stats nfp_dev_stats;
882 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
884 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
886 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
888 /* reading per RX ring stats */
889 for (i = 0; i < dev->data->nb_rx_queues; i++) {
890 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
893 nfp_dev_stats.q_ipackets[i] =
894 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
896 nfp_dev_stats.q_ipackets[i] -=
897 hw->eth_stats_base.q_ipackets[i];
899 nfp_dev_stats.q_ibytes[i] =
900 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
902 nfp_dev_stats.q_ibytes[i] -=
903 hw->eth_stats_base.q_ibytes[i];
906 /* reading per TX ring stats */
907 for (i = 0; i < dev->data->nb_tx_queues; i++) {
908 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
911 nfp_dev_stats.q_opackets[i] =
912 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
914 nfp_dev_stats.q_opackets[i] -=
915 hw->eth_stats_base.q_opackets[i];
917 nfp_dev_stats.q_obytes[i] =
918 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
920 nfp_dev_stats.q_obytes[i] -=
921 hw->eth_stats_base.q_obytes[i];
924 nfp_dev_stats.ipackets =
925 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
927 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
929 nfp_dev_stats.ibytes =
930 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
932 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
934 nfp_dev_stats.opackets =
935 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
937 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
939 nfp_dev_stats.obytes =
940 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
942 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
944 /* reading general device stats */
945 nfp_dev_stats.ierrors =
946 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
948 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
950 nfp_dev_stats.oerrors =
951 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
953 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
955 /* RX ring mbuf allocation failures */
956 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
958 nfp_dev_stats.imissed =
959 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
961 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
964 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
971 nfp_net_stats_reset(struct rte_eth_dev *dev)
974 struct nfp_net_hw *hw;
976 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
979 * hw->eth_stats_base records the per counter starting point.
983 /* reading per RX ring stats */
984 for (i = 0; i < dev->data->nb_rx_queues; i++) {
985 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
988 hw->eth_stats_base.q_ipackets[i] =
989 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
991 hw->eth_stats_base.q_ibytes[i] =
992 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
995 /* reading per TX ring stats */
996 for (i = 0; i < dev->data->nb_tx_queues; i++) {
997 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1000 hw->eth_stats_base.q_opackets[i] =
1001 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1003 hw->eth_stats_base.q_obytes[i] =
1004 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1007 hw->eth_stats_base.ipackets =
1008 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1010 hw->eth_stats_base.ibytes =
1011 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1013 hw->eth_stats_base.opackets =
1014 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1016 hw->eth_stats_base.obytes =
1017 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1019 /* reading general device stats */
1020 hw->eth_stats_base.ierrors =
1021 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1023 hw->eth_stats_base.oerrors =
1024 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1026 /* RX ring mbuf allocation failures */
1027 dev->data->rx_mbuf_alloc_failed = 0;
1029 hw->eth_stats_base.imissed =
1030 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1036 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1038 struct nfp_net_hw *hw;
1040 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1042 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1043 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1044 dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1045 dev_info->max_rx_pktlen = hw->max_mtu;
1046 /* Next should change when PF support is implemented */
1047 dev_info->max_mac_addrs = 1;
1049 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1050 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1052 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1053 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1054 DEV_RX_OFFLOAD_UDP_CKSUM |
1055 DEV_RX_OFFLOAD_TCP_CKSUM;
1057 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1058 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1060 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1061 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1062 DEV_TX_OFFLOAD_UDP_CKSUM |
1063 DEV_TX_OFFLOAD_TCP_CKSUM;
1065 if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
1066 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1068 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1069 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1071 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1073 .pthresh = DEFAULT_RX_PTHRESH,
1074 .hthresh = DEFAULT_RX_HTHRESH,
1075 .wthresh = DEFAULT_RX_WTHRESH,
1077 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1081 dev_info->default_txconf = (struct rte_eth_txconf) {
1083 .pthresh = DEFAULT_TX_PTHRESH,
1084 .hthresh = DEFAULT_TX_HTHRESH,
1085 .wthresh = DEFAULT_TX_WTHRESH,
1087 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1088 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1091 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1092 .nb_max = NFP_NET_MAX_RX_DESC,
1093 .nb_min = NFP_NET_MIN_RX_DESC,
1094 .nb_align = NFP_ALIGN_RING_DESC,
1097 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1098 .nb_max = NFP_NET_MAX_TX_DESC,
1099 .nb_min = NFP_NET_MIN_TX_DESC,
1100 .nb_align = NFP_ALIGN_RING_DESC,
1101 .nb_seg_max = NFP_TX_MAX_SEG,
1102 .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1105 /* All NFP devices support jumbo frames */
1106 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1108 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
1109 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
1111 dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1112 ETH_RSS_NONFRAG_IPV4_TCP |
1113 ETH_RSS_NONFRAG_IPV4_UDP |
1115 ETH_RSS_NONFRAG_IPV6_TCP |
1116 ETH_RSS_NONFRAG_IPV6_UDP;
1118 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1119 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1122 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1123 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1124 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1129 static const uint32_t *
1130 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1132 static const uint32_t ptypes[] = {
1133 /* refers to nfp_net_set_hash() */
1134 RTE_PTYPE_INNER_L3_IPV4,
1135 RTE_PTYPE_INNER_L3_IPV6,
1136 RTE_PTYPE_INNER_L3_IPV6_EXT,
1137 RTE_PTYPE_INNER_L4_MASK,
1141 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1147 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1149 struct rte_pci_device *pci_dev;
1150 struct nfp_net_hw *hw;
1153 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1154 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1156 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1159 /* Make sure all updates are written before un-masking */
1161 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1162 NFP_NET_CFG_ICR_UNMASKED);
1167 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1169 struct rte_pci_device *pci_dev;
1170 struct nfp_net_hw *hw;
1173 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1176 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1179 /* Make sure all updates are written before un-masking */
1181 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1186 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1188 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1189 struct rte_eth_link link;
1191 rte_eth_linkstatus_get(dev, &link);
1192 if (link.link_status)
1193 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1194 dev->data->port_id, link.link_speed,
1195 link.link_duplex == ETH_LINK_FULL_DUPLEX
1196 ? "full-duplex" : "half-duplex");
1198 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1199 dev->data->port_id);
1201 PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1202 pci_dev->addr.domain, pci_dev->addr.bus,
1203 pci_dev->addr.devid, pci_dev->addr.function);
1206 /* Interrupt configuration and handling */
1209 * nfp_net_irq_unmask - Unmask an interrupt
1211 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1212 * clear the ICR for the entry.
1215 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1217 struct nfp_net_hw *hw;
1218 struct rte_pci_device *pci_dev;
1220 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1221 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1223 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1224 /* If MSI-X auto-masking is used, clear the entry */
1226 rte_intr_ack(&pci_dev->intr_handle);
1228 /* Make sure all updates are written before un-masking */
1230 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1231 NFP_NET_CFG_ICR_UNMASKED);
1236 nfp_net_dev_interrupt_handler(void *param)
1239 struct rte_eth_link link;
1240 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1242 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1244 rte_eth_linkstatus_get(dev, &link);
1246 nfp_net_link_update(dev, 0);
1249 if (!link.link_status) {
1250 /* handle it 1 sec later, wait it being stable */
1251 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1252 /* likely to down */
1254 /* handle it 4 sec later, wait it being stable */
1255 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1258 if (rte_eal_alarm_set(timeout * 1000,
1259 nfp_net_dev_interrupt_delayed_handler,
1261 PMD_INIT_LOG(ERR, "Error setting alarm");
1263 nfp_net_irq_unmask(dev);
1268 * Interrupt handler which shall be registered for alarm callback for delayed
1269 * handling specific interrupt to wait for the stable nic state. As the NIC
1270 * interrupt state is not stable for nfp after link is just down, it needs
1271 * to wait 4 seconds to get the stable status.
1273 * @param handle Pointer to interrupt handle.
1274 * @param param The address of parameter (struct rte_eth_dev *)
1279 nfp_net_dev_interrupt_delayed_handler(void *param)
1281 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1283 nfp_net_link_update(dev, 0);
1284 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1286 nfp_net_dev_link_status_print(dev);
1289 nfp_net_irq_unmask(dev);
1293 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1295 struct nfp_net_hw *hw;
1297 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1299 /* check that mtu is within the allowed range */
1300 if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
1303 /* mtu setting is forbidden if port is started */
1304 if (dev->data->dev_started) {
1305 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1306 dev->data->port_id);
1310 /* switch to jumbo mode if needed */
1311 if ((uint32_t)mtu > RTE_ETHER_MTU)
1312 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1314 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1316 /* update max frame size */
1317 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1319 /* writing to configuration space */
1320 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1328 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1330 uint32_t new_ctrl, update;
1331 struct nfp_net_hw *hw;
1334 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 /* Enable vlan strip if it is not configured yet */
1338 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
1339 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
1340 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
1342 /* Disable vlan strip just if it is configured */
1343 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
1344 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
1345 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
1350 update = NFP_NET_CFG_UPDATE_GEN;
1352 ret = nfp_net_reconfig(hw, new_ctrl, update);
1354 hw->ctrl = new_ctrl;
1360 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1361 struct rte_eth_rss_reta_entry64 *reta_conf,
1364 uint32_t reta, mask;
1367 struct nfp_net_hw *hw =
1368 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1370 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1371 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1372 "(%d) doesn't match the number hardware can supported "
1373 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1378 * Update Redirection Table. There are 128 8bit-entries which can be
1379 * manage as 32 32bit-entries
1381 for (i = 0; i < reta_size; i += 4) {
1382 /* Handling 4 RSS entries per loop */
1383 idx = i / RTE_RETA_GROUP_SIZE;
1384 shift = i % RTE_RETA_GROUP_SIZE;
1385 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1391 /* If all 4 entries were set, don't need read RETA register */
1393 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1395 for (j = 0; j < 4; j++) {
1396 if (!(mask & (0x1 << j)))
1399 /* Clearing the entry bits */
1400 reta &= ~(0xFF << (8 * j));
1401 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1403 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
1409 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1411 nfp_net_reta_update(struct rte_eth_dev *dev,
1412 struct rte_eth_rss_reta_entry64 *reta_conf,
1415 struct nfp_net_hw *hw =
1416 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1420 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1423 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1427 update = NFP_NET_CFG_UPDATE_RSS;
1429 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1435 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1437 nfp_net_reta_query(struct rte_eth_dev *dev,
1438 struct rte_eth_rss_reta_entry64 *reta_conf,
1444 struct nfp_net_hw *hw;
1446 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1451 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1452 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1453 "(%d) doesn't match the number hardware can supported "
1454 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1459 * Reading Redirection Table. There are 128 8bit-entries which can be
1460 * manage as 32 32bit-entries
1462 for (i = 0; i < reta_size; i += 4) {
1463 /* Handling 4 RSS entries per loop */
1464 idx = i / RTE_RETA_GROUP_SIZE;
1465 shift = i % RTE_RETA_GROUP_SIZE;
1466 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1471 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
1473 for (j = 0; j < 4; j++) {
1474 if (!(mask & (0x1 << j)))
1476 reta_conf[idx].reta[shift + j] =
1477 (uint8_t)((reta >> (8 * j)) & 0xF);
1484 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1485 struct rte_eth_rss_conf *rss_conf)
1487 struct nfp_net_hw *hw;
1489 uint32_t cfg_rss_ctrl = 0;
1493 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1495 /* Writing the key byte a byte */
1496 for (i = 0; i < rss_conf->rss_key_len; i++) {
1497 memcpy(&key, &rss_conf->rss_key[i], 1);
1498 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1501 rss_hf = rss_conf->rss_hf;
1503 if (rss_hf & ETH_RSS_IPV4)
1504 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1506 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1507 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1509 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1510 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1512 if (rss_hf & ETH_RSS_IPV6)
1513 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1515 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1516 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1518 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1519 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1521 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1522 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1524 /* configuring where to apply the RSS hash */
1525 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1527 /* Writing the key size */
1528 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1534 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1535 struct rte_eth_rss_conf *rss_conf)
1539 struct nfp_net_hw *hw;
1541 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1543 rss_hf = rss_conf->rss_hf;
1545 /* Checking if RSS is enabled */
1546 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1547 if (rss_hf != 0) { /* Enable RSS? */
1548 PMD_DRV_LOG(ERR, "RSS unsupported");
1551 return 0; /* Nothing to do */
1554 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1555 PMD_DRV_LOG(ERR, "hash key too long");
1559 nfp_net_rss_hash_write(dev, rss_conf);
1561 update = NFP_NET_CFG_UPDATE_RSS;
1563 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1570 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1571 struct rte_eth_rss_conf *rss_conf)
1574 uint32_t cfg_rss_ctrl;
1577 struct nfp_net_hw *hw;
1579 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1581 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1584 rss_hf = rss_conf->rss_hf;
1585 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1587 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1588 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
1590 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1591 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1593 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1594 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1596 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1597 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1599 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1600 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1602 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1603 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
1605 /* Propagate current RSS hash functions to caller */
1606 rss_conf->rss_hf = rss_hf;
1608 /* Reading the key size */
1609 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1611 /* Reading the key byte a byte */
1612 for (i = 0; i < rss_conf->rss_key_len; i++) {
1613 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1614 memcpy(&rss_conf->rss_key[i], &key, 1);
1621 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1623 struct rte_eth_conf *dev_conf;
1624 struct rte_eth_rss_conf rss_conf;
1625 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1626 uint16_t rx_queues = dev->data->nb_rx_queues;
1630 PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
1633 nfp_reta_conf[0].mask = ~0x0;
1634 nfp_reta_conf[1].mask = ~0x0;
1637 for (i = 0; i < 0x40; i += 8) {
1638 for (j = i; j < (i + 8); j++) {
1639 nfp_reta_conf[0].reta[j] = queue;
1640 nfp_reta_conf[1].reta[j] = queue++;
1644 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1648 dev_conf = &dev->data->dev_conf;
1650 PMD_DRV_LOG(INFO, "wrong rss conf");
1653 rss_conf = dev_conf->rx_adv_conf.rss_conf;
1655 ret = nfp_net_rss_hash_write(dev, &rss_conf);
1661 /* Initialise and register driver with DPDK Application */
1662 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
1663 .dev_configure = nfp_net_configure,
1664 .dev_start = nfp_net_start,
1665 .dev_stop = nfp_net_stop,
1666 .dev_set_link_up = nfp_net_set_link_up,
1667 .dev_set_link_down = nfp_net_set_link_down,
1668 .dev_close = nfp_net_close,
1669 .promiscuous_enable = nfp_net_promisc_enable,
1670 .promiscuous_disable = nfp_net_promisc_disable,
1671 .link_update = nfp_net_link_update,
1672 .stats_get = nfp_net_stats_get,
1673 .stats_reset = nfp_net_stats_reset,
1674 .dev_infos_get = nfp_net_infos_get,
1675 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
1676 .mtu_set = nfp_net_dev_mtu_set,
1677 .mac_addr_set = nfp_set_mac_addr,
1678 .vlan_offload_set = nfp_net_vlan_offload_set,
1679 .reta_update = nfp_net_reta_update,
1680 .reta_query = nfp_net_reta_query,
1681 .rss_hash_update = nfp_net_rss_hash_update,
1682 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
1683 .rx_queue_setup = nfp_net_rx_queue_setup,
1684 .rx_queue_release = nfp_net_rx_queue_release,
1685 .tx_queue_setup = nfp_net_tx_queue_setup,
1686 .tx_queue_release = nfp_net_tx_queue_release,
1687 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
1688 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
1693 nfp_net_init(struct rte_eth_dev *eth_dev)
1695 struct rte_pci_device *pci_dev;
1696 struct nfp_pf_dev *pf_dev;
1697 struct nfp_net_hw *hw;
1699 uint64_t tx_bar_off = 0, rx_bar_off = 0;
1705 PMD_INIT_FUNC_TRACE();
1707 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1709 /* Use backpointer here to the PF of this eth_dev */
1710 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
1712 /* NFP can not handle DMA addresses requiring more than 40 bits */
1713 if (rte_mem_check_dma_mask(40)) {
1714 RTE_LOG(ERR, PMD, "device %s can not be used:",
1715 pci_dev->device.name);
1716 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
1720 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
1721 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
1722 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
1723 if (port < 0 || port > 7) {
1724 PMD_DRV_LOG(ERR, "Port value is wrong");
1728 /* Use PF array of physical ports to get pointer to
1729 * this specific port
1731 hw = pf_dev->ports[port];
1733 PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
1734 "NFP internal port number: %d",
1738 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1741 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
1742 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
1743 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
1744 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
1746 /* For secondary processes, the primary has done all the work */
1747 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1750 rte_eth_copy_pci_info(eth_dev, pci_dev);
1752 hw->device_id = pci_dev->id.device_id;
1753 hw->vendor_id = pci_dev->id.vendor_id;
1754 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1755 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1757 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
1758 pci_dev->id.vendor_id, pci_dev->id.device_id,
1759 pci_dev->addr.domain, pci_dev->addr.bus,
1760 pci_dev->addr.devid, pci_dev->addr.function);
1762 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
1763 if (hw->ctrl_bar == NULL) {
1765 "hw->ctrl_bar is NULL. BAR0 not configured");
1769 if (hw->is_phyport) {
1771 hw->ctrl_bar = pf_dev->ctrl_bar;
1773 if (!pf_dev->ctrl_bar)
1775 /* Use port offset in pf ctrl_bar for this
1778 hw->ctrl_bar = pf_dev->ctrl_bar +
1779 (port * NFP_PF_CSR_SLICE_SIZE);
1783 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
1785 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
1786 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
1788 /* Work out where in the BAR the queues start. */
1789 switch (pci_dev->id.device_id) {
1790 case PCI_DEVICE_ID_NFP4000_PF_NIC:
1791 case PCI_DEVICE_ID_NFP6000_PF_NIC:
1792 case PCI_DEVICE_ID_NFP6000_VF_NIC:
1793 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1794 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
1795 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
1796 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
1799 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
1801 goto dev_err_ctrl_map;
1804 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
1805 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
1807 if (hw->is_phyport) {
1808 hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
1809 hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
1810 eth_dev->data->dev_private = hw;
1812 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
1814 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
1818 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
1819 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
1821 nfp_net_cfg_queue_setup(hw);
1823 /* Get some of the read-only fields from the config BAR */
1824 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
1825 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
1826 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
1827 hw->mtu = RTE_ETHER_MTU;
1829 /* VLAN insertion is incompatible with LSOv2 */
1830 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
1831 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
1833 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
1834 hw->rx_offset = NFP_NET_RX_OFFSET;
1836 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
1838 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
1839 NFD_CFG_MAJOR_VERSION_of(hw->ver),
1840 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
1842 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
1843 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
1844 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
1845 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
1846 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
1847 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
1848 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
1849 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
1850 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
1851 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
1852 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
1853 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
1854 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
1855 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
1856 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
1860 hw->stride_rx = stride;
1861 hw->stride_tx = stride;
1863 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
1864 hw->max_rx_queues, hw->max_tx_queues);
1866 /* Initializing spinlock for reconfigs */
1867 rte_spinlock_init(&hw->reconfig_lock);
1869 /* Allocating memory for mac addr */
1870 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1871 RTE_ETHER_ADDR_LEN, 0);
1872 if (eth_dev->data->mac_addrs == NULL) {
1873 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
1875 goto dev_err_queues_map;
1878 if (hw->is_phyport) {
1879 nfp_net_pf_read_mac(pf_dev, port);
1880 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
1882 nfp_net_vf_read_mac(hw);
1885 if (!rte_is_valid_assigned_ether_addr(
1886 (struct rte_ether_addr *)&hw->mac_addr)) {
1887 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
1889 /* Using random mac addresses for VFs */
1890 rte_eth_random_addr(&hw->mac_addr[0]);
1891 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
1894 /* Copying mac address to DPDK eth_dev struct */
1895 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
1896 ð_dev->data->mac_addrs[0]);
1898 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
1899 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1901 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1903 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
1904 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
1905 eth_dev->data->port_id, pci_dev->id.vendor_id,
1906 pci_dev->id.device_id,
1907 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1908 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1910 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1911 /* Registering LSC interrupt handler */
1912 rte_intr_callback_register(&pci_dev->intr_handle,
1913 nfp_net_dev_interrupt_handler,
1915 /* Telling the firmware about the LSC interrupt entry */
1916 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1917 /* Recording current stats counters values */
1918 nfp_net_stats_reset(eth_dev);
1924 nfp_cpp_area_free(hw->hwqueues_area);
1926 nfp_cpp_area_free(hw->ctrl_area);
1931 #define NFP_CPP_MEMIO_BOUNDARY (1 << 20)
1934 * Serving a write request to NFP from host programs. The request
1935 * sends the write size and the CPP target. The bridge makes use
1936 * of CPP interface handler configured by the PMD setup.
1939 nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
1941 struct nfp_cpp_area *area;
1942 off_t offset, nfp_offset;
1943 uint32_t cpp_id, pos, len;
1944 uint32_t tmpbuf[16];
1945 size_t count, curlen, totlen = 0;
1948 PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
1949 sizeof(off_t), sizeof(size_t));
1951 /* Reading the count param */
1952 err = recv(sockfd, &count, sizeof(off_t), 0);
1953 if (err != sizeof(off_t))
1958 /* Reading the offset param */
1959 err = recv(sockfd, &offset, sizeof(off_t), 0);
1960 if (err != sizeof(off_t))
1963 /* Obtain target's CPP ID and offset in target */
1964 cpp_id = (offset >> 40) << 8;
1965 nfp_offset = offset & ((1ull << 40) - 1);
1967 PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
1969 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
1970 cpp_id, nfp_offset);
1972 /* Adjust length if not aligned */
1973 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
1974 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
1975 curlen = NFP_CPP_MEMIO_BOUNDARY -
1976 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
1980 /* configure a CPP PCIe2CPP BAR for mapping the CPP target */
1981 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
1982 nfp_offset, curlen);
1984 RTE_LOG(ERR, PMD, "%s: area alloc fail\n", __func__);
1988 /* mapping the target */
1989 err = nfp_cpp_area_acquire(area);
1991 RTE_LOG(ERR, PMD, "area acquire failed\n");
1992 nfp_cpp_area_free(area);
1996 for (pos = 0; pos < curlen; pos += len) {
1998 if (len > sizeof(tmpbuf))
1999 len = sizeof(tmpbuf);
2001 PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__,
2003 err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
2004 if (err != (int)len) {
2006 "%s: error when receiving, %d of %zu\n",
2007 __func__, err, count);
2008 nfp_cpp_area_release(area);
2009 nfp_cpp_area_free(area);
2012 err = nfp_cpp_area_write(area, pos, tmpbuf, len);
2014 RTE_LOG(ERR, PMD, "nfp_cpp_area_write error\n");
2015 nfp_cpp_area_release(area);
2016 nfp_cpp_area_free(area);
2023 nfp_cpp_area_release(area);
2024 nfp_cpp_area_free(area);
2027 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
2028 NFP_CPP_MEMIO_BOUNDARY : count;
2035 * Serving a read request to NFP from host programs. The request
2036 * sends the read size and the CPP target. The bridge makes use
2037 * of CPP interface handler configured by the PMD setup. The read
2038 * data is sent to the requester using the same socket.
2041 nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
2043 struct nfp_cpp_area *area;
2044 off_t offset, nfp_offset;
2045 uint32_t cpp_id, pos, len;
2046 uint32_t tmpbuf[16];
2047 size_t count, curlen, totlen = 0;
2050 PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
2051 sizeof(off_t), sizeof(size_t));
2053 /* Reading the count param */
2054 err = recv(sockfd, &count, sizeof(off_t), 0);
2055 if (err != sizeof(off_t))
2060 /* Reading the offset param */
2061 err = recv(sockfd, &offset, sizeof(off_t), 0);
2062 if (err != sizeof(off_t))
2065 /* Obtain target's CPP ID and offset in target */
2066 cpp_id = (offset >> 40) << 8;
2067 nfp_offset = offset & ((1ull << 40) - 1);
2069 PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
2071 PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
2072 cpp_id, nfp_offset);
2074 /* Adjust length if not aligned */
2075 if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
2076 (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
2077 curlen = NFP_CPP_MEMIO_BOUNDARY -
2078 (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
2082 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
2083 nfp_offset, curlen);
2085 RTE_LOG(ERR, PMD, "%s: area alloc failed\n", __func__);
2089 err = nfp_cpp_area_acquire(area);
2091 RTE_LOG(ERR, PMD, "area acquire failed\n");
2092 nfp_cpp_area_free(area);
2096 for (pos = 0; pos < curlen; pos += len) {
2098 if (len > sizeof(tmpbuf))
2099 len = sizeof(tmpbuf);
2101 err = nfp_cpp_area_read(area, pos, tmpbuf, len);
2103 RTE_LOG(ERR, PMD, "nfp_cpp_area_read error\n");
2104 nfp_cpp_area_release(area);
2105 nfp_cpp_area_free(area);
2108 PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__,
2111 err = send(sockfd, tmpbuf, len, 0);
2112 if (err != (int)len) {
2114 "%s: error when sending: %d of %zu\n",
2115 __func__, err, count);
2116 nfp_cpp_area_release(area);
2117 nfp_cpp_area_free(area);
2124 nfp_cpp_area_release(area);
2125 nfp_cpp_area_free(area);
2128 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
2129 NFP_CPP_MEMIO_BOUNDARY : count;
2134 #define NFP_IOCTL 'n'
2135 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
2137 * Serving a ioctl command from host NFP tools. This usually goes to
2138 * a kernel driver char driver but it is not available when the PF is
2139 * bound to the PMD. Currently just one ioctl command is served and it
2140 * does not require any CPP access at all.
2143 nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp)
2145 uint32_t cmd, ident_size, tmp;
2148 /* Reading now the IOCTL command */
2149 err = recv(sockfd, &cmd, 4, 0);
2151 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
2155 /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
2156 if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) {
2157 RTE_LOG(ERR, PMD, "%s: unknown cmd %d\n", __func__, cmd);
2161 err = recv(sockfd, &ident_size, 4, 0);
2163 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
2167 tmp = nfp_cpp_model(cpp);
2169 PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp);
2171 err = send(sockfd, &tmp, 4, 0);
2173 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
2177 tmp = cpp->interface;
2179 PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp);
2181 err = send(sockfd, &tmp, 4, 0);
2183 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
2190 #define NFP_BRIDGE_OP_READ 20
2191 #define NFP_BRIDGE_OP_WRITE 30
2192 #define NFP_BRIDGE_OP_IOCTL 40
2195 * This is the code to be executed by a service core. The CPP bridge interface
2196 * is based on a unix socket and requests usually received by a kernel char
2197 * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools
2198 * can be executed with a wrapper library and LD_LIBRARY being completely
2199 * unaware of the CPP bridge performing the NFP kernel char driver for CPP
2203 nfp_cpp_bridge_service_func(void *args)
2205 struct sockaddr address;
2206 struct nfp_cpp *cpp = args;
2207 int sockfd, datafd, op, ret;
2209 unlink("/tmp/nfp_cpp");
2210 sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
2212 RTE_LOG(ERR, PMD, "%s: socket creation error. Service failed\n",
2217 memset(&address, 0, sizeof(struct sockaddr));
2219 address.sa_family = AF_UNIX;
2220 strcpy(address.sa_data, "/tmp/nfp_cpp");
2222 ret = bind(sockfd, (const struct sockaddr *)&address,
2223 sizeof(struct sockaddr));
2225 RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n",
2231 ret = listen(sockfd, 20);
2233 RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n",
2240 datafd = accept(sockfd, NULL, NULL);
2242 RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n",
2244 RTE_LOG(ERR, PMD, "%s: service failed\n", __func__);
2250 ret = recv(datafd, &op, 4, 0);
2252 PMD_CPP_LOG(DEBUG, "%s: socket close\n",
2257 PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op);
2259 if (op == NFP_BRIDGE_OP_READ)
2260 nfp_cpp_bridge_serve_read(datafd, cpp);
2262 if (op == NFP_BRIDGE_OP_WRITE)
2263 nfp_cpp_bridge_serve_write(datafd, cpp);
2265 if (op == NFP_BRIDGE_OP_IOCTL)
2266 nfp_cpp_bridge_serve_ioctl(datafd, cpp);
2278 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
2281 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
2283 struct nfp_cpp *cpp = nsp->cpp;
2289 /* Looking for firmware file in order of priority */
2291 /* First try to find a firmware image specific for this device */
2292 snprintf(serial, sizeof(serial),
2293 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
2294 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
2295 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
2296 cpp->interface & 0xff);
2298 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
2300 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
2301 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
2304 /* Then try the PCI name */
2305 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
2307 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
2308 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
2311 /* Finally try the card type and media */
2312 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
2313 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
2314 if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
2315 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
2320 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
2323 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
2324 nfp_nsp_load_fw(nsp, fw_buf, fsize);
2325 PMD_DRV_LOG(INFO, "Done");
2332 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
2333 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
2335 struct nfp_nsp *nsp;
2336 const char *nfp_fw_model;
2337 char card_desc[100];
2340 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
2343 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
2345 PMD_DRV_LOG(ERR, "firmware model NOT found");
2349 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
2350 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
2351 nfp_eth_table->count);
2355 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
2356 nfp_eth_table->count);
2358 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
2360 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
2361 nfp_fw_model, nfp_eth_table->count,
2362 nfp_eth_table->ports[0].speed / 1000);
2364 nsp = nfp_nsp_open(cpp);
2366 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
2370 nfp_nsp_device_soft_reset(nsp);
2371 err = nfp_fw_upload(dev, nsp, card_desc);
2377 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
2379 struct nfp_net_hw *hw;
2380 struct rte_eth_dev *eth_dev;
2381 struct nfp_eth_table *nfp_eth_table = NULL;
2385 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
2386 if (!nfp_eth_table) {
2387 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
2392 /* Loop through all physical ports on PF */
2393 for (i = 0; i < pf_dev->total_phyports; i++) {
2394 const unsigned int numa_node = rte_socket_id();
2395 char port_name[RTE_ETH_NAME_MAX_LEN];
2397 snprintf(port_name, sizeof(port_name), "%s_port%d",
2398 pf_dev->pci_dev->device.name, i);
2400 /* Allocate a eth_dev for this phyport */
2401 eth_dev = rte_eth_dev_allocate(port_name);
2407 /* Allocate memory for this phyport */
2408 eth_dev->data->dev_private =
2409 rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
2410 RTE_CACHE_LINE_SIZE, numa_node);
2411 if (!eth_dev->data->dev_private) {
2413 rte_eth_dev_release_port(eth_dev);
2417 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2419 /* Add this device to the PF's array of physical ports */
2420 pf_dev->ports[i] = hw;
2422 hw->pf_dev = pf_dev;
2423 hw->cpp = pf_dev->cpp;
2424 hw->eth_dev = eth_dev;
2426 hw->nfp_idx = nfp_eth_table->ports[i].index;
2427 hw->is_phyport = true;
2429 eth_dev->device = &pf_dev->pci_dev->device;
2431 /* ctrl/tx/rx BAR mappings and remaining init happens in
2434 ret = nfp_net_init(eth_dev);
2441 rte_eth_dev_probing_finish(eth_dev);
2443 } /* End loop, all ports on this PF */
2445 goto eth_table_cleanup;
2448 for (i = 0; i < pf_dev->total_phyports; i++) {
2449 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
2450 struct rte_eth_dev *tmp_dev;
2451 tmp_dev = pf_dev->ports[i]->eth_dev;
2452 rte_eth_dev_release_port(tmp_dev);
2453 pf_dev->ports[i] = NULL;
2457 free(nfp_eth_table);
2462 static void nfp_register_cpp_service(struct nfp_cpp *cpp)
2464 uint32_t *cpp_service_id = NULL;
2465 struct rte_service_spec service;
2467 memset(&service, 0, sizeof(struct rte_service_spec));
2468 snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
2469 service.callback = nfp_cpp_bridge_service_func;
2470 service.callback_userdata = (void *)cpp;
2472 if (rte_service_component_register(&service,
2474 RTE_LOG(WARNING, PMD, "NFP CPP bridge service register() failed");
2476 RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
2479 static int nfp_pf_init(struct rte_pci_device *pci_dev)
2481 struct nfp_pf_dev *pf_dev = NULL;
2482 struct nfp_cpp *cpp;
2483 struct nfp_hwinfo *hwinfo;
2484 struct nfp_rtsym_table *sym_tbl;
2485 struct nfp_eth_table *nfp_eth_table = NULL;
2486 char name[RTE_ETH_NAME_MAX_LEN];
2495 * When device bound to UIO, the device could be used, by mistake,
2496 * by two DPDK apps, and the UIO driver does not avoid it. This
2497 * could lead to a serious problem when configuring the NFP CPP
2498 * interface. Here we avoid this telling to the CPP init code to
2499 * use a lock file if UIO is being used.
2501 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2502 cpp = nfp_cpp_from_device_name(pci_dev, 0);
2504 cpp = nfp_cpp_from_device_name(pci_dev, 1);
2507 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2512 hwinfo = nfp_hwinfo_read(cpp);
2514 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
2519 nfp_eth_table = nfp_eth_read_ports(cpp);
2520 if (!nfp_eth_table) {
2521 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
2523 goto hwinfo_cleanup;
2526 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
2527 PMD_INIT_LOG(ERR, "Error when uploading firmware");
2529 goto eth_table_cleanup;
2532 /* Now the symbol table should be there */
2533 sym_tbl = nfp_rtsym_table_read(cpp);
2535 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
2538 goto eth_table_cleanup;
2541 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
2542 if (total_ports != (int)nfp_eth_table->count) {
2543 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
2545 goto sym_tbl_cleanup;
2548 PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
2550 if (total_ports <= 0 || total_ports > 8) {
2551 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
2553 goto sym_tbl_cleanup;
2555 /* Allocate memory for the PF "device" */
2556 snprintf(name, sizeof(name), "nfp_pf%d", 0);
2557 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2560 goto sym_tbl_cleanup;
2563 /* Populate the newly created PF device */
2565 pf_dev->hwinfo = hwinfo;
2566 pf_dev->sym_tbl = sym_tbl;
2567 pf_dev->total_phyports = total_ports;
2569 if (total_ports > 1)
2570 pf_dev->multiport = true;
2572 pf_dev->pci_dev = pci_dev;
2574 /* Map the symbol table */
2575 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
2576 pf_dev->total_phyports * 32768,
2577 &pf_dev->ctrl_area);
2578 if (!pf_dev->ctrl_bar) {
2579 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
2584 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
2586 /* configure access to tx/rx vNIC BARs */
2587 pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
2589 NFP_QCP_QUEUE_AREA_SZ,
2590 &pf_dev->hwqueues_area);
2591 if (!pf_dev->hw_queues) {
2592 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
2594 goto ctrl_area_cleanup;
2597 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
2599 /* Initialize and prep physical ports now
2600 * This will loop through all physical ports
2602 ret = nfp_init_phyports(pf_dev);
2604 PMD_INIT_LOG(ERR, "Could not create physical ports");
2605 goto hwqueues_cleanup;
2608 /* register the CPP bridge service here for primary use */
2609 nfp_register_cpp_service(pf_dev->cpp);
2614 nfp_cpp_area_free(pf_dev->hwqueues_area);
2616 nfp_cpp_area_free(pf_dev->ctrl_area);
2622 free(nfp_eth_table);
2630 * When attaching to the NFP4000/6000 PF on a secondary process there
2631 * is no need to initialize the PF again. Only minimal work is required
2634 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2636 struct nfp_cpp *cpp;
2637 struct nfp_rtsym_table *sym_tbl;
2646 * When device bound to UIO, the device could be used, by mistake,
2647 * by two DPDK apps, and the UIO driver does not avoid it. This
2648 * could lead to a serious problem when configuring the NFP CPP
2649 * interface. Here we avoid this telling to the CPP init code to
2650 * use a lock file if UIO is being used.
2652 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2653 cpp = nfp_cpp_from_device_name(pci_dev, 0);
2655 cpp = nfp_cpp_from_device_name(pci_dev, 1);
2658 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2663 * We don't have access to the PF created in the primary process
2664 * here so we have to read the number of ports from firmware
2666 sym_tbl = nfp_rtsym_table_read(cpp);
2668 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
2673 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
2675 for (i = 0; i < total_ports; i++) {
2676 struct rte_eth_dev *eth_dev;
2677 char port_name[RTE_ETH_NAME_MAX_LEN];
2679 snprintf(port_name, sizeof(port_name), "%s_port%d",
2680 pci_dev->device.name, i);
2682 PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s",
2684 eth_dev = rte_eth_dev_attach_secondary(port_name);
2687 "secondary process attach failed, "
2688 "ethdev doesn't exist");
2691 eth_dev->process_private = cpp;
2692 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2693 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
2694 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2695 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2696 rte_eth_dev_probing_finish(eth_dev);
2699 /* Register the CPP bridge service for the secondary too */
2700 nfp_register_cpp_service(cpp);
2705 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2706 struct rte_pci_device *dev)
2708 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2709 return nfp_pf_init(dev);
2711 return nfp_pf_secondary_init(dev);
2714 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2716 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2717 PCI_DEVICE_ID_NFP4000_PF_NIC)
2720 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2721 PCI_DEVICE_ID_NFP6000_PF_NIC)
2728 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
2730 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2731 PCI_DEVICE_ID_NFP6000_VF_NIC)
2738 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2740 struct rte_pci_device *pci_dev;
2743 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2745 if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
2746 pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
2747 /* Free up all physical ports under PF */
2748 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2749 rte_eth_dev_close(port_id);
2751 * Ports can be closed and freed but hotplugging is not
2752 * currently supported
2757 /* VF cleanup, just free private port data */
2758 return nfp_net_close(eth_dev);
2761 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2762 struct rte_pci_device *pci_dev)
2764 return rte_eth_dev_pci_generic_probe(pci_dev,
2765 sizeof(struct nfp_net_adapter), nfp_net_init);
2768 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2770 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2773 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2774 .id_table = pci_id_nfp_pf_net_map,
2775 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2776 .probe = nfp_pf_pci_probe,
2777 .remove = eth_nfp_pci_remove,
2780 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
2781 .id_table = pci_id_nfp_vf_net_map,
2782 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2783 .probe = eth_nfp_pci_probe,
2784 .remove = eth_nfp_pci_remove,
2787 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
2788 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
2789 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
2790 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
2791 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
2792 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
2793 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
2794 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
2797 * c-file-style: "Linux"
2798 * indent-tabs-mode: t