1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_ethdev.c
13 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include "eal_firmware.h"
27 #include "nfpcore/nfp_cpp.h"
28 #include "nfpcore/nfp_nffw.h"
29 #include "nfpcore/nfp_hwinfo.h"
30 #include "nfpcore/nfp_mip.h"
31 #include "nfpcore/nfp_rtsym.h"
32 #include "nfpcore/nfp_nsp.h"
34 #include "nfp_common.h"
38 #include "nfp_cpp_bridge.h"
41 static int nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port);
42 static int nfp_net_start(struct rte_eth_dev *dev);
43 static int nfp_net_stop(struct rte_eth_dev *dev);
44 static int nfp_net_set_link_up(struct rte_eth_dev *dev);
45 static int nfp_net_set_link_down(struct rte_eth_dev *dev);
46 static int nfp_net_close(struct rte_eth_dev *dev);
47 static int nfp_net_init(struct rte_eth_dev *eth_dev);
48 static int nfp_fw_upload(struct rte_pci_device *dev,
49 struct nfp_nsp *nsp, char *card);
50 static int nfp_fw_setup(struct rte_pci_device *dev,
52 struct nfp_eth_table *nfp_eth_table,
53 struct nfp_hwinfo *hwinfo);
54 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
55 static int nfp_pf_init(struct rte_pci_device *pci_dev);
56 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev);
57 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
58 struct rte_pci_device *dev);
59 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
60 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev);
63 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
65 struct nfp_eth_table *nfp_eth_table;
66 struct nfp_net_hw *hw = NULL;
68 /* Grab a pointer to the correct physical port */
69 hw = pf_dev->ports[port];
71 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
73 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
74 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
81 nfp_net_start(struct rte_eth_dev *dev)
83 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
84 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
85 uint32_t new_ctrl, update = 0;
86 struct nfp_net_hw *hw;
87 struct nfp_pf_dev *pf_dev;
88 struct rte_eth_conf *dev_conf;
89 struct rte_eth_rxmode *rxmode;
93 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
94 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
96 PMD_INIT_LOG(DEBUG, "Start");
98 /* Disabling queues just in case... */
99 nfp_net_disable_queues(dev);
101 /* Enabling the required queues in the device */
102 nfp_net_enable_queues(dev);
104 /* check and configure queue intr-vector mapping */
105 if (dev->data->dev_conf.intr_conf.rxq != 0) {
106 if (pf_dev->multiport) {
107 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
108 "with NFP multiport PF");
111 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
113 * Better not to share LSC with RX interrupts.
114 * Unregistering LSC interrupt handler
116 rte_intr_callback_unregister(&pci_dev->intr_handle,
117 nfp_net_dev_interrupt_handler, (void *)dev);
119 if (dev->data->nb_rx_queues > 1) {
120 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
121 "supports 1 queue with UIO");
125 intr_vector = dev->data->nb_rx_queues;
126 if (rte_intr_efd_enable(intr_handle, intr_vector))
129 nfp_configure_rx_interrupt(dev, intr_handle);
130 update = NFP_NET_CFG_UPDATE_MSIX;
133 rte_intr_enable(intr_handle);
135 new_ctrl = nfp_check_offloads(dev);
137 /* Writing configuration parameters in the device */
138 nfp_net_params_setup(hw);
140 dev_conf = &dev->data->dev_conf;
141 rxmode = &dev_conf->rxmode;
143 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
144 nfp_net_rss_config_default(dev);
145 update |= NFP_NET_CFG_UPDATE_RSS;
146 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
150 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
152 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
154 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
155 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
157 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
158 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
162 * Allocating rte mbufs for configured rx queues.
163 * This requires queues being enabled before
165 if (nfp_net_rx_freelist_setup(dev) < 0) {
170 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171 /* Configure the physical port up */
172 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
174 nfp_eth_set_configured(dev->process_private,
183 * An error returned by this function should mean the app
184 * exiting and then the system releasing all the memory
185 * allocated even memory coming from hugepages.
187 * The device could be enabled at this point with some queues
188 * ready for getting packets. This is true if the call to
189 * nfp_net_rx_freelist_setup() succeeds for some queues but
190 * fails for subsequent queues.
192 * This should make the app exiting but better if we tell the
195 nfp_net_disable_queues(dev);
200 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
202 nfp_net_stop(struct rte_eth_dev *dev)
205 struct nfp_net_hw *hw;
206 struct nfp_net_txq *this_tx_q;
207 struct nfp_net_rxq *this_rx_q;
209 PMD_INIT_LOG(DEBUG, "Stop");
211 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
213 nfp_net_disable_queues(dev);
216 for (i = 0; i < dev->data->nb_tx_queues; i++) {
217 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
218 nfp_net_reset_tx_queue(this_tx_q);
221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
222 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
223 nfp_net_reset_rx_queue(this_rx_q);
226 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
227 /* Configure the physical port down */
228 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
230 nfp_eth_set_configured(dev->process_private,
236 /* Set the link up. */
238 nfp_net_set_link_up(struct rte_eth_dev *dev)
240 struct nfp_net_hw *hw;
242 PMD_DRV_LOG(DEBUG, "Set link up");
244 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
246 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
247 /* Configure the physical port down */
248 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
250 return nfp_eth_set_configured(dev->process_private,
254 /* Set the link down. */
256 nfp_net_set_link_down(struct rte_eth_dev *dev)
258 struct nfp_net_hw *hw;
260 PMD_DRV_LOG(DEBUG, "Set link down");
262 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
264 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
265 /* Configure the physical port down */
266 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
268 return nfp_eth_set_configured(dev->process_private,
272 /* Reset and stop device. The device can not be restarted. */
274 nfp_net_close(struct rte_eth_dev *dev)
276 struct nfp_net_hw *hw;
277 struct rte_pci_device *pci_dev;
278 struct nfp_pf_dev *pf_dev;
279 struct nfp_net_txq *this_tx_q;
280 struct nfp_net_rxq *this_rx_q;
283 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
286 PMD_INIT_LOG(DEBUG, "Close");
288 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
289 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
290 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
293 * We assume that the DPDK application is stopping all the
294 * threads/queues before calling the device close function.
297 nfp_net_disable_queues(dev);
300 for (i = 0; i < dev->data->nb_tx_queues; i++) {
301 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
302 nfp_net_reset_tx_queue(this_tx_q);
305 for (i = 0; i < dev->data->nb_rx_queues; i++) {
306 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
307 nfp_net_reset_rx_queue(this_rx_q);
310 /* Only free PF resources after all physical ports have been closed */
311 /* Mark this port as unused and free device priv resources*/
312 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
313 pf_dev->ports[hw->idx] = NULL;
314 rte_eth_dev_release_port(dev);
316 for (i = 0; i < pf_dev->total_phyports; i++) {
317 /* Check to see if ports are still in use */
318 if (pf_dev->ports[i])
322 /* Now it is safe to free all PF resources */
323 PMD_INIT_LOG(INFO, "Freeing PF resources");
324 nfp_cpp_area_free(pf_dev->ctrl_area);
325 nfp_cpp_area_free(pf_dev->hwqueues_area);
326 free(pf_dev->hwinfo);
327 free(pf_dev->sym_tbl);
328 nfp_cpp_free(pf_dev->cpp);
331 rte_intr_disable(&pci_dev->intr_handle);
333 /* unregister callback func from eal lib */
334 rte_intr_callback_unregister(&pci_dev->intr_handle,
335 nfp_net_dev_interrupt_handler,
339 * The ixgbe PMD driver disables the pcie master on the
340 * device. The i40e does not...
346 /* Initialise and register driver with DPDK Application */
347 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
348 .dev_configure = nfp_net_configure,
349 .dev_start = nfp_net_start,
350 .dev_stop = nfp_net_stop,
351 .dev_set_link_up = nfp_net_set_link_up,
352 .dev_set_link_down = nfp_net_set_link_down,
353 .dev_close = nfp_net_close,
354 .promiscuous_enable = nfp_net_promisc_enable,
355 .promiscuous_disable = nfp_net_promisc_disable,
356 .link_update = nfp_net_link_update,
357 .stats_get = nfp_net_stats_get,
358 .stats_reset = nfp_net_stats_reset,
359 .dev_infos_get = nfp_net_infos_get,
360 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
361 .mtu_set = nfp_net_dev_mtu_set,
362 .mac_addr_set = nfp_set_mac_addr,
363 .vlan_offload_set = nfp_net_vlan_offload_set,
364 .reta_update = nfp_net_reta_update,
365 .reta_query = nfp_net_reta_query,
366 .rss_hash_update = nfp_net_rss_hash_update,
367 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
368 .rx_queue_setup = nfp_net_rx_queue_setup,
369 .rx_queue_release = nfp_net_rx_queue_release,
370 .tx_queue_setup = nfp_net_tx_queue_setup,
371 .tx_queue_release = nfp_net_tx_queue_release,
372 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
373 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
377 nfp_net_init(struct rte_eth_dev *eth_dev)
379 struct rte_pci_device *pci_dev;
380 struct nfp_pf_dev *pf_dev;
381 struct nfp_net_hw *hw;
382 struct rte_ether_addr *tmp_ether_addr;
384 uint64_t tx_bar_off = 0, rx_bar_off = 0;
390 PMD_INIT_FUNC_TRACE();
392 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
394 /* Use backpointer here to the PF of this eth_dev */
395 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
397 /* NFP can not handle DMA addresses requiring more than 40 bits */
398 if (rte_mem_check_dma_mask(40)) {
399 RTE_LOG(ERR, PMD, "device %s can not be used:",
400 pci_dev->device.name);
401 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
405 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
406 if (port < 0 || port > 7) {
407 PMD_DRV_LOG(ERR, "Port value is wrong");
411 /* Use PF array of physical ports to get pointer to
414 hw = pf_dev->ports[port];
416 PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
417 "NFP internal port number: %d",
420 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
421 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
422 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
423 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
425 /* For secondary processes, the primary has done all the work */
426 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
429 rte_eth_copy_pci_info(eth_dev, pci_dev);
431 hw->device_id = pci_dev->id.device_id;
432 hw->vendor_id = pci_dev->id.vendor_id;
433 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
434 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
436 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
437 pci_dev->id.vendor_id, pci_dev->id.device_id,
438 pci_dev->addr.domain, pci_dev->addr.bus,
439 pci_dev->addr.devid, pci_dev->addr.function);
441 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
442 if (hw->ctrl_bar == NULL) {
444 "hw->ctrl_bar is NULL. BAR0 not configured");
449 hw->ctrl_bar = pf_dev->ctrl_bar;
451 if (!pf_dev->ctrl_bar)
453 /* Use port offset in pf ctrl_bar for this
456 hw->ctrl_bar = pf_dev->ctrl_bar +
457 (port * NFP_PF_CSR_SLICE_SIZE);
460 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
462 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
463 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
465 /* Work out where in the BAR the queues start. */
466 switch (pci_dev->id.device_id) {
467 case PCI_DEVICE_ID_NFP4000_PF_NIC:
468 case PCI_DEVICE_ID_NFP6000_PF_NIC:
469 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
470 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
471 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
472 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
475 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
477 goto dev_err_ctrl_map;
480 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
481 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
483 hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
484 hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
485 eth_dev->data->dev_private = hw;
487 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
488 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
490 nfp_net_cfg_queue_setup(hw);
492 /* Get some of the read-only fields from the config BAR */
493 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
494 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
495 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
496 hw->mtu = RTE_ETHER_MTU;
498 /* VLAN insertion is incompatible with LSOv2 */
499 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
500 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
502 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
503 hw->rx_offset = NFP_NET_RX_OFFSET;
505 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
507 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
508 NFD_CFG_MAJOR_VERSION_of(hw->ver),
509 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
511 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
512 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
513 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
514 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
515 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
516 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
517 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
518 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
519 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
520 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
521 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
522 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
523 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
524 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
525 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
529 hw->stride_rx = stride;
530 hw->stride_tx = stride;
532 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
533 hw->max_rx_queues, hw->max_tx_queues);
535 /* Initializing spinlock for reconfigs */
536 rte_spinlock_init(&hw->reconfig_lock);
538 /* Allocating memory for mac addr */
539 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
540 RTE_ETHER_ADDR_LEN, 0);
541 if (eth_dev->data->mac_addrs == NULL) {
542 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
544 goto dev_err_queues_map;
547 nfp_net_pf_read_mac(pf_dev, port);
548 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
550 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
551 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
552 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
554 /* Using random mac addresses for VFs */
555 rte_eth_random_addr(&hw->mac_addr[0]);
556 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
559 /* Copying mac address to DPDK eth_dev struct */
560 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
561 ð_dev->data->mac_addrs[0]);
563 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
564 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
566 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
568 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
569 "mac=" RTE_ETHER_ADDR_PRT_FMT,
570 eth_dev->data->port_id, pci_dev->id.vendor_id,
571 pci_dev->id.device_id,
572 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
573 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
575 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576 /* Registering LSC interrupt handler */
577 rte_intr_callback_register(&pci_dev->intr_handle,
578 nfp_net_dev_interrupt_handler,
580 /* Telling the firmware about the LSC interrupt entry */
581 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
582 /* Recording current stats counters values */
583 nfp_net_stats_reset(eth_dev);
589 nfp_cpp_area_free(hw->hwqueues_area);
591 nfp_cpp_area_free(hw->ctrl_area);
596 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
599 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
601 struct nfp_cpp *cpp = nsp->cpp;
607 /* Looking for firmware file in order of priority */
609 /* First try to find a firmware image specific for this device */
610 snprintf(serial, sizeof(serial),
611 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
612 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
613 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
614 cpp->interface & 0xff);
616 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
619 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
620 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
622 /* Then try the PCI name */
623 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
626 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
627 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
630 /* Finally try the card type and media */
631 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
632 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
633 if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
634 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
639 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
641 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
642 nfp_nsp_load_fw(nsp, fw_buf, fsize);
643 PMD_DRV_LOG(INFO, "Done");
651 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
652 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
655 const char *nfp_fw_model;
659 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
662 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
664 PMD_DRV_LOG(ERR, "firmware model NOT found");
668 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
669 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
670 nfp_eth_table->count);
674 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
675 nfp_eth_table->count);
677 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
679 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
680 nfp_fw_model, nfp_eth_table->count,
681 nfp_eth_table->ports[0].speed / 1000);
683 nsp = nfp_nsp_open(cpp);
685 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
689 nfp_nsp_device_soft_reset(nsp);
690 err = nfp_fw_upload(dev, nsp, card_desc);
696 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
698 struct nfp_net_hw *hw;
699 struct rte_eth_dev *eth_dev;
700 struct nfp_eth_table *nfp_eth_table = NULL;
704 nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
705 if (!nfp_eth_table) {
706 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
711 /* Loop through all physical ports on PF */
712 for (i = 0; i < pf_dev->total_phyports; i++) {
713 const unsigned int numa_node = rte_socket_id();
714 char port_name[RTE_ETH_NAME_MAX_LEN];
716 snprintf(port_name, sizeof(port_name), "%s_port%d",
717 pf_dev->pci_dev->device.name, i);
719 /* Allocate a eth_dev for this phyport */
720 eth_dev = rte_eth_dev_allocate(port_name);
726 /* Allocate memory for this phyport */
727 eth_dev->data->dev_private =
728 rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
729 RTE_CACHE_LINE_SIZE, numa_node);
730 if (!eth_dev->data->dev_private) {
732 rte_eth_dev_release_port(eth_dev);
736 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
738 /* Add this device to the PF's array of physical ports */
739 pf_dev->ports[i] = hw;
742 hw->cpp = pf_dev->cpp;
743 hw->eth_dev = eth_dev;
745 hw->nfp_idx = nfp_eth_table->ports[i].index;
746 hw->is_phyport = true;
748 eth_dev->device = &pf_dev->pci_dev->device;
750 /* ctrl/tx/rx BAR mappings and remaining init happens in
753 ret = nfp_net_init(eth_dev);
760 rte_eth_dev_probing_finish(eth_dev);
762 } /* End loop, all ports on this PF */
764 goto eth_table_cleanup;
767 for (i = 0; i < pf_dev->total_phyports; i++) {
768 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
769 struct rte_eth_dev *tmp_dev;
770 tmp_dev = pf_dev->ports[i]->eth_dev;
771 rte_eth_dev_release_port(tmp_dev);
772 pf_dev->ports[i] = NULL;
781 static int nfp_pf_init(struct rte_pci_device *pci_dev)
783 struct nfp_pf_dev *pf_dev = NULL;
785 struct nfp_hwinfo *hwinfo;
786 struct nfp_rtsym_table *sym_tbl;
787 struct nfp_eth_table *nfp_eth_table = NULL;
788 char name[RTE_ETH_NAME_MAX_LEN];
797 * When device bound to UIO, the device could be used, by mistake,
798 * by two DPDK apps, and the UIO driver does not avoid it. This
799 * could lead to a serious problem when configuring the NFP CPP
800 * interface. Here we avoid this telling to the CPP init code to
801 * use a lock file if UIO is being used.
803 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
804 cpp = nfp_cpp_from_device_name(pci_dev, 0);
806 cpp = nfp_cpp_from_device_name(pci_dev, 1);
809 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
814 hwinfo = nfp_hwinfo_read(cpp);
816 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
821 nfp_eth_table = nfp_eth_read_ports(cpp);
822 if (!nfp_eth_table) {
823 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
828 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
829 PMD_INIT_LOG(ERR, "Error when uploading firmware");
831 goto eth_table_cleanup;
834 /* Now the symbol table should be there */
835 sym_tbl = nfp_rtsym_table_read(cpp);
837 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
840 goto eth_table_cleanup;
843 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
844 if (total_ports != (int)nfp_eth_table->count) {
845 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
847 goto sym_tbl_cleanup;
850 PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
852 if (total_ports <= 0 || total_ports > 8) {
853 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
855 goto sym_tbl_cleanup;
857 /* Allocate memory for the PF "device" */
858 snprintf(name, sizeof(name), "nfp_pf%d", 0);
859 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
862 goto sym_tbl_cleanup;
865 /* Populate the newly created PF device */
867 pf_dev->hwinfo = hwinfo;
868 pf_dev->sym_tbl = sym_tbl;
869 pf_dev->total_phyports = total_ports;
872 pf_dev->multiport = true;
874 pf_dev->pci_dev = pci_dev;
876 /* Map the symbol table */
877 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
878 pf_dev->total_phyports * 32768,
880 if (!pf_dev->ctrl_bar) {
881 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
886 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
888 /* configure access to tx/rx vNIC BARs */
889 pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
891 NFP_QCP_QUEUE_AREA_SZ,
892 &pf_dev->hwqueues_area);
893 if (!pf_dev->hw_queues) {
894 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
896 goto ctrl_area_cleanup;
899 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
901 /* Initialize and prep physical ports now
902 * This will loop through all physical ports
904 ret = nfp_init_phyports(pf_dev);
906 PMD_INIT_LOG(ERR, "Could not create physical ports");
907 goto hwqueues_cleanup;
910 /* register the CPP bridge service here for primary use */
911 nfp_register_cpp_service(pf_dev->cpp);
916 nfp_cpp_area_free(pf_dev->hwqueues_area);
918 nfp_cpp_area_free(pf_dev->ctrl_area);
932 * When attaching to the NFP4000/6000 PF on a secondary process there
933 * is no need to initialise the PF again. Only minimal work is required
936 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
939 struct nfp_rtsym_table *sym_tbl;
948 * When device bound to UIO, the device could be used, by mistake,
949 * by two DPDK apps, and the UIO driver does not avoid it. This
950 * could lead to a serious problem when configuring the NFP CPP
951 * interface. Here we avoid this telling to the CPP init code to
952 * use a lock file if UIO is being used.
954 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
955 cpp = nfp_cpp_from_device_name(pci_dev, 0);
957 cpp = nfp_cpp_from_device_name(pci_dev, 1);
960 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
965 * We don't have access to the PF created in the primary process
966 * here so we have to read the number of ports from firmware
968 sym_tbl = nfp_rtsym_table_read(cpp);
970 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
975 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
977 for (i = 0; i < total_ports; i++) {
978 struct rte_eth_dev *eth_dev;
979 char port_name[RTE_ETH_NAME_MAX_LEN];
981 snprintf(port_name, sizeof(port_name), "%s_port%d",
982 pci_dev->device.name, i);
984 PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s",
986 eth_dev = rte_eth_dev_attach_secondary(port_name);
989 "secondary process attach failed, "
990 "ethdev doesn't exist");
993 eth_dev->process_private = cpp;
994 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
995 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
996 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
997 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
998 rte_eth_dev_probing_finish(eth_dev);
1001 /* Register the CPP bridge service for the secondary too */
1002 nfp_register_cpp_service(cpp);
1007 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1008 struct rte_pci_device *dev)
1010 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1011 return nfp_pf_init(dev);
1013 return nfp_pf_secondary_init(dev);
1016 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1018 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1019 PCI_DEVICE_ID_NFP4000_PF_NIC)
1022 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1023 PCI_DEVICE_ID_NFP6000_PF_NIC)
1030 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1032 struct rte_pci_device *pci_dev;
1035 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1037 /* Free up all physical ports under PF */
1038 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1039 rte_eth_dev_close(port_id);
1041 * Ports can be closed and freed but hotplugging is not
1042 * currently supported
1047 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1049 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1052 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1053 .id_table = pci_id_nfp_pf_net_map,
1054 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1055 .probe = nfp_pf_pci_probe,
1056 .remove = eth_nfp_pci_remove,
1059 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1060 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1061 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1064 * c-file-style: "Linux"
1065 * indent-tabs-mode: t