1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_ethdev_vf.c
13 * Netronome vNIC VF DPDK Poll-Mode Driver: Main entry point
16 #include <rte_alarm.h>
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
21 #include "nfp_common.h"
27 nfp_netvf_read_mac(struct nfp_net_hw *hw)
31 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
32 memcpy(&hw->mac_addr[0], &tmp, 4);
34 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
35 memcpy(&hw->mac_addr[4], &tmp, 2);
39 nfp_netvf_start(struct rte_eth_dev *dev)
41 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
42 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
43 uint32_t new_ctrl, update = 0;
44 struct nfp_net_hw *hw;
45 struct rte_eth_conf *dev_conf;
46 struct rte_eth_rxmode *rxmode;
50 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
52 PMD_INIT_LOG(DEBUG, "Start");
54 /* Disabling queues just in case... */
55 nfp_net_disable_queues(dev);
57 /* Enabling the required queues in the device */
58 nfp_net_enable_queues(dev);
60 /* check and configure queue intr-vector mapping */
61 if (dev->data->dev_conf.intr_conf.rxq != 0) {
62 if (rte_intr_type_get(intr_handle) ==
63 RTE_INTR_HANDLE_UIO) {
65 * Better not to share LSC with RX interrupts.
66 * Unregistering LSC interrupt handler
68 rte_intr_callback_unregister(pci_dev->intr_handle,
69 nfp_net_dev_interrupt_handler, (void *)dev);
71 if (dev->data->nb_rx_queues > 1) {
72 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
73 "supports 1 queue with UIO");
77 intr_vector = dev->data->nb_rx_queues;
78 if (rte_intr_efd_enable(intr_handle, intr_vector))
81 nfp_configure_rx_interrupt(dev, intr_handle);
82 update = NFP_NET_CFG_UPDATE_MSIX;
85 rte_intr_enable(intr_handle);
87 new_ctrl = nfp_check_offloads(dev);
89 /* Writing configuration parameters in the device */
90 nfp_net_params_setup(hw);
92 dev_conf = &dev->data->dev_conf;
93 rxmode = &dev_conf->rxmode;
95 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
96 nfp_net_rss_config_default(dev);
97 update |= NFP_NET_CFG_UPDATE_RSS;
98 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
102 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
104 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
106 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
107 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
109 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
110 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
114 * Allocating rte mbufs for configured rx queues.
115 * This requires queues being enabled before
117 if (nfp_net_rx_freelist_setup(dev) < 0) {
128 * An error returned by this function should mean the app
129 * exiting and then the system releasing all the memory
130 * allocated even memory coming from hugepages.
132 * The device could be enabled at this point with some queues
133 * ready for getting packets. This is true if the call to
134 * nfp_net_rx_freelist_setup() succeeds for some queues but
135 * fails for subsequent queues.
137 * This should make the app exiting but better if we tell the
140 nfp_net_disable_queues(dev);
146 nfp_netvf_stop(struct rte_eth_dev *dev)
148 struct nfp_net_txq *this_tx_q;
149 struct nfp_net_rxq *this_rx_q;
152 PMD_INIT_LOG(DEBUG, "Stop");
154 nfp_net_disable_queues(dev);
157 for (i = 0; i < dev->data->nb_tx_queues; i++) {
158 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
159 nfp_net_reset_tx_queue(this_tx_q);
162 for (i = 0; i < dev->data->nb_rx_queues; i++) {
163 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
164 nfp_net_reset_rx_queue(this_rx_q);
171 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
176 /* Set the link down. */
178 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
183 /* Reset and stop device. The device can not be restarted. */
185 nfp_netvf_close(struct rte_eth_dev *dev)
187 struct rte_pci_device *pci_dev;
188 struct nfp_net_txq *this_tx_q;
189 struct nfp_net_rxq *this_rx_q;
192 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
195 PMD_INIT_LOG(DEBUG, "Close");
197 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
200 * We assume that the DPDK application is stopping all the
201 * threads/queues before calling the device close function.
204 nfp_net_disable_queues(dev);
207 for (i = 0; i < dev->data->nb_tx_queues; i++) {
208 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
209 nfp_net_reset_tx_queue(this_tx_q);
210 nfp_net_tx_queue_release(dev, i);
213 for (i = 0; i < dev->data->nb_rx_queues; i++) {
214 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
215 nfp_net_reset_rx_queue(this_rx_q);
216 nfp_net_rx_queue_release(dev, i);
219 rte_intr_disable(pci_dev->intr_handle);
221 /* unregister callback func from eal lib */
222 rte_intr_callback_unregister(pci_dev->intr_handle,
223 nfp_net_dev_interrupt_handler,
226 /* Cancel possible impending LSC work here before releasing the port*/
227 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
231 * The ixgbe PMD disables the pcie master on the
232 * device. The i40e does not...
238 /* Initialise and register VF driver with DPDK Application */
239 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
240 .dev_configure = nfp_net_configure,
241 .dev_start = nfp_netvf_start,
242 .dev_stop = nfp_netvf_stop,
243 .dev_set_link_up = nfp_netvf_set_link_up,
244 .dev_set_link_down = nfp_netvf_set_link_down,
245 .dev_close = nfp_netvf_close,
246 .promiscuous_enable = nfp_net_promisc_enable,
247 .promiscuous_disable = nfp_net_promisc_disable,
248 .link_update = nfp_net_link_update,
249 .stats_get = nfp_net_stats_get,
250 .stats_reset = nfp_net_stats_reset,
251 .dev_infos_get = nfp_net_infos_get,
252 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
253 .mtu_set = nfp_net_dev_mtu_set,
254 .mac_addr_set = nfp_set_mac_addr,
255 .vlan_offload_set = nfp_net_vlan_offload_set,
256 .reta_update = nfp_net_reta_update,
257 .reta_query = nfp_net_reta_query,
258 .rss_hash_update = nfp_net_rss_hash_update,
259 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
260 .rx_queue_setup = nfp_net_rx_queue_setup,
261 .rx_queue_release = nfp_net_rx_queue_release,
262 .tx_queue_setup = nfp_net_tx_queue_setup,
263 .tx_queue_release = nfp_net_tx_queue_release,
264 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
265 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
269 nfp_netvf_init(struct rte_eth_dev *eth_dev)
271 struct rte_pci_device *pci_dev;
272 struct nfp_net_hw *hw;
273 struct rte_ether_addr *tmp_ether_addr;
275 uint64_t tx_bar_off = 0, rx_bar_off = 0;
281 PMD_INIT_FUNC_TRACE();
283 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
285 /* NFP can not handle DMA addresses requiring more than 40 bits */
286 if (rte_mem_check_dma_mask(40)) {
288 "device %s can not be used: restricted dma mask to 40 bits!\n",
289 pci_dev->device.name);
293 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
295 eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
296 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
297 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
298 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
300 /* For secondary processes, the primary has done all the work */
301 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
304 rte_eth_copy_pci_info(eth_dev, pci_dev);
306 hw->device_id = pci_dev->id.device_id;
307 hw->vendor_id = pci_dev->id.vendor_id;
308 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
309 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
311 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
312 pci_dev->id.vendor_id, pci_dev->id.device_id,
313 pci_dev->addr.domain, pci_dev->addr.bus,
314 pci_dev->addr.devid, pci_dev->addr.function);
316 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
317 if (hw->ctrl_bar == NULL) {
319 "hw->ctrl_bar is NULL. BAR0 not configured");
323 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
325 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
326 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
328 /* Work out where in the BAR the queues start. */
329 switch (pci_dev->id.device_id) {
330 case PCI_DEVICE_ID_NFP6000_VF_NIC:
331 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
332 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
333 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
334 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
337 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
339 goto dev_err_ctrl_map;
342 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
343 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
345 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
347 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
350 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
351 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
353 nfp_net_cfg_queue_setup(hw);
355 /* Get some of the read-only fields from the config BAR */
356 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
357 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
358 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
359 hw->mtu = RTE_ETHER_MTU;
361 /* VLAN insertion is incompatible with LSOv2 */
362 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
363 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
365 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
366 hw->rx_offset = NFP_NET_RX_OFFSET;
368 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
370 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
371 NFD_CFG_MAJOR_VERSION_of(hw->ver),
372 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
374 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
375 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
376 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
377 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
378 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
379 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
380 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
381 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
382 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
383 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
384 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
385 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
386 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
387 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
388 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
392 hw->stride_rx = stride;
393 hw->stride_tx = stride;
395 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
396 hw->max_rx_queues, hw->max_tx_queues);
398 /* Initializing spinlock for reconfigs */
399 rte_spinlock_init(&hw->reconfig_lock);
401 /* Allocating memory for mac addr */
402 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
403 RTE_ETHER_ADDR_LEN, 0);
404 if (eth_dev->data->mac_addrs == NULL) {
405 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
407 goto dev_err_queues_map;
410 nfp_netvf_read_mac(hw);
412 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
413 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
414 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
416 /* Using random mac addresses for VFs */
417 rte_eth_random_addr(&hw->mac_addr[0]);
418 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
421 /* Copying mac address to DPDK eth_dev struct */
422 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
423 ð_dev->data->mac_addrs[0]);
425 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
426 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
428 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
430 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
431 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
432 eth_dev->data->port_id, pci_dev->id.vendor_id,
433 pci_dev->id.device_id,
434 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
435 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
437 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
438 /* Registering LSC interrupt handler */
439 rte_intr_callback_register(pci_dev->intr_handle,
440 nfp_net_dev_interrupt_handler,
442 /* Telling the firmware about the LSC interrupt entry */
443 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
444 /* Recording current stats counters values */
445 nfp_net_stats_reset(eth_dev);
451 nfp_cpp_area_free(hw->hwqueues_area);
453 nfp_cpp_area_free(hw->ctrl_area);
458 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
460 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
461 PCI_DEVICE_ID_NFP6000_VF_NIC)
468 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
470 /* VF cleanup, just free private port data */
471 return nfp_netvf_close(eth_dev);
474 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
475 struct rte_pci_device *pci_dev)
477 return rte_eth_dev_pci_generic_probe(pci_dev,
478 sizeof(struct nfp_net_adapter), nfp_netvf_init);
481 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
483 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
486 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
487 .id_table = pci_id_nfp_vf_net_map,
488 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
489 .probe = eth_nfp_vf_pci_probe,
490 .remove = eth_nfp_vf_pci_remove,
493 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
494 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
495 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
498 * c-file-style: "Linux"
499 * indent-tabs-mode: t