1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_ethdev_vf.c
13 * Netronome vNIC VF DPDK Poll-Mode Driver: Main entry point
16 #include "nfpcore/nfp_mip.h"
17 #include "nfpcore/nfp_rtsym.h"
19 #include "nfp_common.h"
24 static void nfp_netvf_read_mac(struct nfp_net_hw *hw);
25 static int nfp_netvf_start(struct rte_eth_dev *dev);
26 static int nfp_netvf_stop(struct rte_eth_dev *dev);
27 static int nfp_netvf_set_link_up(struct rte_eth_dev *dev);
28 static int nfp_netvf_set_link_down(struct rte_eth_dev *dev);
29 static int nfp_netvf_close(struct rte_eth_dev *dev);
30 static int nfp_netvf_init(struct rte_eth_dev *eth_dev);
31 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev);
32 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
33 struct rte_pci_device *pci_dev);
34 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev);
37 nfp_netvf_read_mac(struct nfp_net_hw *hw)
41 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
42 memcpy(&hw->mac_addr[0], &tmp, 4);
44 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
45 memcpy(&hw->mac_addr[4], &tmp, 2);
49 nfp_netvf_start(struct rte_eth_dev *dev)
51 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
52 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
53 uint32_t new_ctrl, update = 0;
54 struct nfp_net_hw *hw;
55 struct rte_eth_conf *dev_conf;
56 struct rte_eth_rxmode *rxmode;
60 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
62 PMD_INIT_LOG(DEBUG, "Start");
64 /* Disabling queues just in case... */
65 nfp_net_disable_queues(dev);
67 /* Enabling the required queues in the device */
68 nfp_net_enable_queues(dev);
70 /* check and configure queue intr-vector mapping */
71 if (dev->data->dev_conf.intr_conf.rxq != 0) {
72 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
74 * Better not to share LSC with RX interrupts.
75 * Unregistering LSC interrupt handler
77 rte_intr_callback_unregister(&pci_dev->intr_handle,
78 nfp_net_dev_interrupt_handler, (void *)dev);
80 if (dev->data->nb_rx_queues > 1) {
81 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
82 "supports 1 queue with UIO");
86 intr_vector = dev->data->nb_rx_queues;
87 if (rte_intr_efd_enable(intr_handle, intr_vector))
90 nfp_configure_rx_interrupt(dev, intr_handle);
91 update = NFP_NET_CFG_UPDATE_MSIX;
94 rte_intr_enable(intr_handle);
96 new_ctrl = nfp_check_offloads(dev);
98 /* Writing configuration parameters in the device */
99 nfp_net_params_setup(hw);
101 dev_conf = &dev->data->dev_conf;
102 rxmode = &dev_conf->rxmode;
104 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
105 nfp_net_rss_config_default(dev);
106 update |= NFP_NET_CFG_UPDATE_RSS;
107 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
111 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
113 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
115 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
116 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
118 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
119 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
123 * Allocating rte mbufs for configured rx queues.
124 * This requires queues being enabled before
126 if (nfp_net_rx_freelist_setup(dev) < 0) {
137 * An error returned by this function should mean the app
138 * exiting and then the system releasing all the memory
139 * allocated even memory coming from hugepages.
141 * The device could be enabled at this point with some queues
142 * ready for getting packets. This is true if the call to
143 * nfp_net_rx_freelist_setup() succeeds for some queues but
144 * fails for subsequent queues.
146 * This should make the app exiting but better if we tell the
149 nfp_net_disable_queues(dev);
155 nfp_netvf_stop(struct rte_eth_dev *dev)
157 struct nfp_net_txq *this_tx_q;
158 struct nfp_net_rxq *this_rx_q;
161 PMD_INIT_LOG(DEBUG, "Stop");
163 nfp_net_disable_queues(dev);
166 for (i = 0; i < dev->data->nb_tx_queues; i++) {
167 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
168 nfp_net_reset_tx_queue(this_tx_q);
171 for (i = 0; i < dev->data->nb_rx_queues; i++) {
172 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
173 nfp_net_reset_rx_queue(this_rx_q);
180 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
185 /* Set the link down. */
187 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
192 /* Reset and stop device. The device can not be restarted. */
194 nfp_netvf_close(struct rte_eth_dev *dev)
196 struct rte_pci_device *pci_dev;
197 struct nfp_net_txq *this_tx_q;
198 struct nfp_net_rxq *this_rx_q;
201 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
204 PMD_INIT_LOG(DEBUG, "Close");
206 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
209 * We assume that the DPDK application is stopping all the
210 * threads/queues before calling the device close function.
213 nfp_net_disable_queues(dev);
216 for (i = 0; i < dev->data->nb_tx_queues; i++) {
217 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
218 nfp_net_reset_tx_queue(this_tx_q);
221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
222 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
223 nfp_net_reset_rx_queue(this_rx_q);
226 rte_intr_disable(&pci_dev->intr_handle);
228 /* unregister callback func from eal lib */
229 rte_intr_callback_unregister(&pci_dev->intr_handle,
230 nfp_net_dev_interrupt_handler,
234 * The ixgbe PMD driver disables the pcie master on the
235 * device. The i40e does not...
241 /* Initialise and register VF driver with DPDK Application */
242 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
243 .dev_configure = nfp_net_configure,
244 .dev_start = nfp_netvf_start,
245 .dev_stop = nfp_netvf_stop,
246 .dev_set_link_up = nfp_netvf_set_link_up,
247 .dev_set_link_down = nfp_netvf_set_link_down,
248 .dev_close = nfp_netvf_close,
249 .promiscuous_enable = nfp_net_promisc_enable,
250 .promiscuous_disable = nfp_net_promisc_disable,
251 .link_update = nfp_net_link_update,
252 .stats_get = nfp_net_stats_get,
253 .stats_reset = nfp_net_stats_reset,
254 .dev_infos_get = nfp_net_infos_get,
255 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
256 .mtu_set = nfp_net_dev_mtu_set,
257 .mac_addr_set = nfp_set_mac_addr,
258 .vlan_offload_set = nfp_net_vlan_offload_set,
259 .reta_update = nfp_net_reta_update,
260 .reta_query = nfp_net_reta_query,
261 .rss_hash_update = nfp_net_rss_hash_update,
262 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
263 .rx_queue_setup = nfp_net_rx_queue_setup,
264 .rx_queue_release = nfp_net_rx_queue_release,
265 .tx_queue_setup = nfp_net_tx_queue_setup,
266 .tx_queue_release = nfp_net_tx_queue_release,
267 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
268 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
272 nfp_netvf_init(struct rte_eth_dev *eth_dev)
274 struct rte_pci_device *pci_dev;
275 struct nfp_net_hw *hw;
276 struct rte_ether_addr *tmp_ether_addr;
278 uint64_t tx_bar_off = 0, rx_bar_off = 0;
284 PMD_INIT_FUNC_TRACE();
286 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
288 /* NFP can not handle DMA addresses requiring more than 40 bits */
289 if (rte_mem_check_dma_mask(40)) {
290 RTE_LOG(ERR, PMD, "device %s can not be used:",
291 pci_dev->device.name);
292 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
296 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
298 eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
299 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
300 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
301 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
303 /* For secondary processes, the primary has done all the work */
304 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
307 rte_eth_copy_pci_info(eth_dev, pci_dev);
309 hw->device_id = pci_dev->id.device_id;
310 hw->vendor_id = pci_dev->id.vendor_id;
311 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
312 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
314 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
315 pci_dev->id.vendor_id, pci_dev->id.device_id,
316 pci_dev->addr.domain, pci_dev->addr.bus,
317 pci_dev->addr.devid, pci_dev->addr.function);
319 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
320 if (hw->ctrl_bar == NULL) {
322 "hw->ctrl_bar is NULL. BAR0 not configured");
326 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
328 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
329 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
331 /* Work out where in the BAR the queues start. */
332 switch (pci_dev->id.device_id) {
333 case PCI_DEVICE_ID_NFP6000_VF_NIC:
334 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
335 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
336 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
337 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
340 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
342 goto dev_err_ctrl_map;
345 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
346 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
348 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
350 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
353 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
354 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
356 nfp_net_cfg_queue_setup(hw);
358 /* Get some of the read-only fields from the config BAR */
359 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
360 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
361 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
362 hw->mtu = RTE_ETHER_MTU;
364 /* VLAN insertion is incompatible with LSOv2 */
365 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
366 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
368 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
369 hw->rx_offset = NFP_NET_RX_OFFSET;
371 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
373 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
374 NFD_CFG_MAJOR_VERSION_of(hw->ver),
375 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
377 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
378 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
379 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
380 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
381 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
382 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
383 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
384 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
385 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
386 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
387 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
388 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
389 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
390 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
391 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
395 hw->stride_rx = stride;
396 hw->stride_tx = stride;
398 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
399 hw->max_rx_queues, hw->max_tx_queues);
401 /* Initializing spinlock for reconfigs */
402 rte_spinlock_init(&hw->reconfig_lock);
404 /* Allocating memory for mac addr */
405 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
406 RTE_ETHER_ADDR_LEN, 0);
407 if (eth_dev->data->mac_addrs == NULL) {
408 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
410 goto dev_err_queues_map;
413 nfp_netvf_read_mac(hw);
415 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
416 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
417 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
419 /* Using random mac addresses for VFs */
420 rte_eth_random_addr(&hw->mac_addr[0]);
421 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
424 /* Copying mac address to DPDK eth_dev struct */
425 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
426 ð_dev->data->mac_addrs[0]);
428 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
429 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
431 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
433 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
434 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
435 eth_dev->data->port_id, pci_dev->id.vendor_id,
436 pci_dev->id.device_id,
437 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
438 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
440 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
441 /* Registering LSC interrupt handler */
442 rte_intr_callback_register(&pci_dev->intr_handle,
443 nfp_net_dev_interrupt_handler,
445 /* Telling the firmware about the LSC interrupt entry */
446 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
447 /* Recording current stats counters values */
448 nfp_net_stats_reset(eth_dev);
454 nfp_cpp_area_free(hw->hwqueues_area);
456 nfp_cpp_area_free(hw->ctrl_area);
461 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
463 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
464 PCI_DEVICE_ID_NFP6000_VF_NIC)
471 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
473 /* VF cleanup, just free private port data */
474 return nfp_netvf_close(eth_dev);
477 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
478 struct rte_pci_device *pci_dev)
480 return rte_eth_dev_pci_generic_probe(pci_dev,
481 sizeof(struct nfp_net_adapter), nfp_netvf_init);
484 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
486 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
489 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
490 .id_table = pci_id_nfp_vf_net_map,
491 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
492 .probe = eth_nfp_vf_pci_probe,
493 .remove = eth_nfp_vf_pci_remove,
496 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
497 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
498 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
501 * c-file-style: "Linux"
502 * indent-tabs-mode: t