1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_ethdev_vf.c
13 * Netronome vNIC VF DPDK Poll-Mode Driver: Main entry point
16 #include <rte_alarm.h>
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
21 #include "nfp_common.h"
27 nfp_netvf_read_mac(struct nfp_net_hw *hw)
31 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
32 memcpy(&hw->mac_addr[0], &tmp, 4);
34 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
35 memcpy(&hw->mac_addr[4], &tmp, 2);
39 nfp_netvf_start(struct rte_eth_dev *dev)
41 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
42 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
43 uint32_t new_ctrl, update = 0;
44 struct nfp_net_hw *hw;
45 struct rte_eth_conf *dev_conf;
46 struct rte_eth_rxmode *rxmode;
50 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
52 PMD_INIT_LOG(DEBUG, "Start");
54 /* Disabling queues just in case... */
55 nfp_net_disable_queues(dev);
57 /* Enabling the required queues in the device */
58 nfp_net_enable_queues(dev);
60 /* check and configure queue intr-vector mapping */
61 if (dev->data->dev_conf.intr_conf.rxq != 0) {
62 if (rte_intr_type_get(intr_handle) ==
63 RTE_INTR_HANDLE_UIO) {
65 * Better not to share LSC with RX interrupts.
66 * Unregistering LSC interrupt handler
68 rte_intr_callback_unregister(pci_dev->intr_handle,
69 nfp_net_dev_interrupt_handler, (void *)dev);
71 if (dev->data->nb_rx_queues > 1) {
72 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
73 "supports 1 queue with UIO");
77 intr_vector = dev->data->nb_rx_queues;
78 if (rte_intr_efd_enable(intr_handle, intr_vector))
81 nfp_configure_rx_interrupt(dev, intr_handle);
82 update = NFP_NET_CFG_UPDATE_MSIX;
85 rte_intr_enable(intr_handle);
87 new_ctrl = nfp_check_offloads(dev);
89 /* Writing configuration parameters in the device */
90 nfp_net_params_setup(hw);
92 dev_conf = &dev->data->dev_conf;
93 rxmode = &dev_conf->rxmode;
95 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
96 nfp_net_rss_config_default(dev);
97 update |= NFP_NET_CFG_UPDATE_RSS;
98 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
102 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
104 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
106 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
107 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
109 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
110 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
114 * Allocating rte mbufs for configured rx queues.
115 * This requires queues being enabled before
117 if (nfp_net_rx_freelist_setup(dev) < 0) {
128 * An error returned by this function should mean the app
129 * exiting and then the system releasing all the memory
130 * allocated even memory coming from hugepages.
132 * The device could be enabled at this point with some queues
133 * ready for getting packets. This is true if the call to
134 * nfp_net_rx_freelist_setup() succeeds for some queues but
135 * fails for subsequent queues.
137 * This should make the app exiting but better if we tell the
140 nfp_net_disable_queues(dev);
146 nfp_netvf_stop(struct rte_eth_dev *dev)
148 struct nfp_net_txq *this_tx_q;
149 struct nfp_net_rxq *this_rx_q;
152 PMD_INIT_LOG(DEBUG, "Stop");
154 nfp_net_disable_queues(dev);
157 for (i = 0; i < dev->data->nb_tx_queues; i++) {
158 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
159 nfp_net_reset_tx_queue(this_tx_q);
162 for (i = 0; i < dev->data->nb_rx_queues; i++) {
163 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
164 nfp_net_reset_rx_queue(this_rx_q);
171 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
176 /* Set the link down. */
178 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
183 /* Reset and stop device. The device can not be restarted. */
185 nfp_netvf_close(struct rte_eth_dev *dev)
187 struct rte_pci_device *pci_dev;
188 struct nfp_net_txq *this_tx_q;
189 struct nfp_net_rxq *this_rx_q;
192 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
195 PMD_INIT_LOG(DEBUG, "Close");
197 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
200 * We assume that the DPDK application is stopping all the
201 * threads/queues before calling the device close function.
204 nfp_net_disable_queues(dev);
207 for (i = 0; i < dev->data->nb_tx_queues; i++) {
208 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
209 nfp_net_reset_tx_queue(this_tx_q);
210 nfp_net_tx_queue_release(dev, i);
213 for (i = 0; i < dev->data->nb_rx_queues; i++) {
214 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
215 nfp_net_reset_rx_queue(this_rx_q);
216 nfp_net_rx_queue_release(dev, i);
219 rte_intr_disable(pci_dev->intr_handle);
221 /* unregister callback func from eal lib */
222 rte_intr_callback_unregister(pci_dev->intr_handle,
223 nfp_net_dev_interrupt_handler,
226 /* Cancel possible impending LSC work here before releasing the port*/
227 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
231 * The ixgbe PMD disables the pcie master on the
232 * device. The i40e does not...
238 /* Initialise and register VF driver with DPDK Application */
239 static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops = {
240 .dev_configure = nfp_net_configure,
241 .dev_start = nfp_netvf_start,
242 .dev_stop = nfp_netvf_stop,
243 .dev_set_link_up = nfp_netvf_set_link_up,
244 .dev_set_link_down = nfp_netvf_set_link_down,
245 .dev_close = nfp_netvf_close,
246 .promiscuous_enable = nfp_net_promisc_enable,
247 .promiscuous_disable = nfp_net_promisc_disable,
248 .link_update = nfp_net_link_update,
249 .stats_get = nfp_net_stats_get,
250 .stats_reset = nfp_net_stats_reset,
251 .dev_infos_get = nfp_net_infos_get,
252 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
253 .mtu_set = nfp_net_dev_mtu_set,
254 .mac_addr_set = nfp_net_set_mac_addr,
255 .vlan_offload_set = nfp_net_vlan_offload_set,
256 .reta_update = nfp_net_reta_update,
257 .reta_query = nfp_net_reta_query,
258 .rss_hash_update = nfp_net_rss_hash_update,
259 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
260 .rx_queue_setup = nfp_net_rx_queue_setup,
261 .rx_queue_release = nfp_net_rx_queue_release,
262 .tx_queue_setup = nfp_net_nfd3_tx_queue_setup,
263 .tx_queue_release = nfp_net_tx_queue_release,
264 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
265 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
268 static const struct eth_dev_ops nfp_netvf_nfdk_eth_dev_ops = {
269 .dev_configure = nfp_net_configure,
270 .dev_start = nfp_netvf_start,
271 .dev_stop = nfp_netvf_stop,
272 .dev_set_link_up = nfp_netvf_set_link_up,
273 .dev_set_link_down = nfp_netvf_set_link_down,
274 .dev_close = nfp_netvf_close,
275 .promiscuous_enable = nfp_net_promisc_enable,
276 .promiscuous_disable = nfp_net_promisc_disable,
277 .link_update = nfp_net_link_update,
278 .stats_get = nfp_net_stats_get,
279 .stats_reset = nfp_net_stats_reset,
280 .dev_infos_get = nfp_net_infos_get,
281 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
282 .mtu_set = nfp_net_dev_mtu_set,
283 .mac_addr_set = nfp_net_set_mac_addr,
284 .vlan_offload_set = nfp_net_vlan_offload_set,
285 .reta_update = nfp_net_reta_update,
286 .reta_query = nfp_net_reta_query,
287 .rss_hash_update = nfp_net_rss_hash_update,
288 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
289 .rx_queue_setup = nfp_net_rx_queue_setup,
290 .rx_queue_release = nfp_net_rx_queue_release,
291 .tx_queue_setup = nfp_net_nfdk_tx_queue_setup,
292 .tx_queue_release = nfp_net_tx_queue_release,
293 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
294 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
298 nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
300 switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
301 case NFP_NET_CFG_VERSION_DP_NFD3:
302 eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
304 case NFP_NET_CFG_VERSION_DP_NFDK:
305 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
306 PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
307 NFD_CFG_MAJOR_VERSION_of(hw->ver));
310 eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops;
313 PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
317 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
318 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
319 eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
325 nfp_netvf_init(struct rte_eth_dev *eth_dev)
327 struct rte_pci_device *pci_dev;
328 struct nfp_net_hw *hw;
329 struct rte_ether_addr *tmp_ether_addr;
331 uint64_t tx_bar_off = 0, rx_bar_off = 0;
337 PMD_INIT_FUNC_TRACE();
339 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
341 /* NFP can not handle DMA addresses requiring more than 40 bits */
342 if (rte_mem_check_dma_mask(40)) {
344 "device %s can not be used: restricted dma mask to 40 bits!\n",
345 pci_dev->device.name);
349 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
351 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
352 if (hw->ctrl_bar == NULL) {
354 "hw->ctrl_bar is NULL. BAR0 not configured");
358 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
360 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
362 if (nfp_netvf_ethdev_ops_mount(hw, eth_dev))
365 /* For secondary processes, the primary has done all the work */
366 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
369 rte_eth_copy_pci_info(eth_dev, pci_dev);
371 hw->device_id = pci_dev->id.device_id;
372 hw->vendor_id = pci_dev->id.vendor_id;
373 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
374 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
376 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
377 pci_dev->id.vendor_id, pci_dev->id.device_id,
378 pci_dev->addr.domain, pci_dev->addr.bus,
379 pci_dev->addr.devid, pci_dev->addr.function);
381 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
382 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
384 /* Work out where in the BAR the queues start. */
385 switch (pci_dev->id.device_id) {
386 case PCI_DEVICE_ID_NFP3800_VF_NIC:
387 case PCI_DEVICE_ID_NFP6000_VF_NIC:
388 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
389 tx_bar_off = nfp_pci_queue(pci_dev, start_q);
390 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
391 rx_bar_off = nfp_pci_queue(pci_dev, start_q);
394 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
396 goto dev_err_ctrl_map;
399 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
400 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
402 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
404 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
407 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
408 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
410 nfp_net_cfg_queue_setup(hw);
412 /* Get some of the read-only fields from the config BAR */
413 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
414 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
415 hw->mtu = RTE_ETHER_MTU;
417 /* VLAN insertion is incompatible with LSOv2 */
418 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
419 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
421 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
422 hw->rx_offset = NFP_NET_RX_OFFSET;
424 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
426 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
427 NFD_CFG_MAJOR_VERSION_of(hw->ver),
428 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
430 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
431 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
432 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
433 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
434 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
435 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
436 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
437 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
438 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
439 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
440 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
441 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
442 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
443 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
444 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
448 hw->stride_rx = stride;
449 hw->stride_tx = stride;
451 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
452 hw->max_rx_queues, hw->max_tx_queues);
454 /* Initializing spinlock for reconfigs */
455 rte_spinlock_init(&hw->reconfig_lock);
457 /* Allocating memory for mac addr */
458 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
459 RTE_ETHER_ADDR_LEN, 0);
460 if (eth_dev->data->mac_addrs == NULL) {
461 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
463 goto dev_err_queues_map;
466 nfp_netvf_read_mac(hw);
468 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
469 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
470 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
472 /* Using random mac addresses for VFs */
473 rte_eth_random_addr(&hw->mac_addr[0]);
474 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
477 /* Copying mac address to DPDK eth_dev struct */
478 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
479 ð_dev->data->mac_addrs[0]);
481 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
482 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
484 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
486 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
487 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
488 eth_dev->data->port_id, pci_dev->id.vendor_id,
489 pci_dev->id.device_id,
490 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
491 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
493 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
494 /* Registering LSC interrupt handler */
495 rte_intr_callback_register(pci_dev->intr_handle,
496 nfp_net_dev_interrupt_handler,
498 /* Telling the firmware about the LSC interrupt entry */
499 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
500 /* Recording current stats counters values */
501 nfp_net_stats_reset(eth_dev);
507 nfp_cpp_area_free(hw->hwqueues_area);
509 nfp_cpp_area_free(hw->ctrl_area);
514 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
516 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
517 PCI_DEVICE_ID_NFP3800_VF_NIC)
520 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
521 PCI_DEVICE_ID_NFP6000_VF_NIC)
528 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
530 /* VF cleanup, just free private port data */
531 return nfp_netvf_close(eth_dev);
534 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
535 struct rte_pci_device *pci_dev)
537 return rte_eth_dev_pci_generic_probe(pci_dev,
538 sizeof(struct nfp_net_adapter), nfp_netvf_init);
541 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
543 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
546 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
547 .id_table = pci_id_nfp_vf_net_map,
548 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
549 .probe = eth_nfp_vf_pci_probe,
550 .remove = eth_nfp_vf_pci_remove,
553 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
554 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
555 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
558 * c-file-style: "Linux"
559 * indent-tabs-mode: t