1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_ethdev_vf.c
13 * Netronome vNIC VF DPDK Poll-Mode Driver: Main entry point
16 #include <rte_alarm.h>
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
21 #include "nfp_common.h"
26 static void nfp_netvf_read_mac(struct nfp_net_hw *hw);
27 static int nfp_netvf_start(struct rte_eth_dev *dev);
28 static int nfp_netvf_stop(struct rte_eth_dev *dev);
29 static int nfp_netvf_set_link_up(struct rte_eth_dev *dev);
30 static int nfp_netvf_set_link_down(struct rte_eth_dev *dev);
31 static int nfp_netvf_close(struct rte_eth_dev *dev);
32 static int nfp_netvf_init(struct rte_eth_dev *eth_dev);
33 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev);
34 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
35 struct rte_pci_device *pci_dev);
36 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev);
39 nfp_netvf_read_mac(struct nfp_net_hw *hw)
43 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
44 memcpy(&hw->mac_addr[0], &tmp, 4);
46 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
47 memcpy(&hw->mac_addr[4], &tmp, 2);
51 nfp_netvf_start(struct rte_eth_dev *dev)
53 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
54 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
55 uint32_t new_ctrl, update = 0;
56 struct nfp_net_hw *hw;
57 struct rte_eth_conf *dev_conf;
58 struct rte_eth_rxmode *rxmode;
62 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
64 PMD_INIT_LOG(DEBUG, "Start");
66 /* Disabling queues just in case... */
67 nfp_net_disable_queues(dev);
69 /* Enabling the required queues in the device */
70 nfp_net_enable_queues(dev);
72 /* check and configure queue intr-vector mapping */
73 if (dev->data->dev_conf.intr_conf.rxq != 0) {
74 if (rte_intr_type_get(intr_handle) ==
75 RTE_INTR_HANDLE_UIO) {
77 * Better not to share LSC with RX interrupts.
78 * Unregistering LSC interrupt handler
80 rte_intr_callback_unregister(pci_dev->intr_handle,
81 nfp_net_dev_interrupt_handler, (void *)dev);
83 if (dev->data->nb_rx_queues > 1) {
84 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
85 "supports 1 queue with UIO");
89 intr_vector = dev->data->nb_rx_queues;
90 if (rte_intr_efd_enable(intr_handle, intr_vector))
93 nfp_configure_rx_interrupt(dev, intr_handle);
94 update = NFP_NET_CFG_UPDATE_MSIX;
97 rte_intr_enable(intr_handle);
99 new_ctrl = nfp_check_offloads(dev);
101 /* Writing configuration parameters in the device */
102 nfp_net_params_setup(hw);
104 dev_conf = &dev->data->dev_conf;
105 rxmode = &dev_conf->rxmode;
107 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
108 nfp_net_rss_config_default(dev);
109 update |= NFP_NET_CFG_UPDATE_RSS;
110 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
114 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
116 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
118 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
119 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
121 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
122 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
126 * Allocating rte mbufs for configured rx queues.
127 * This requires queues being enabled before
129 if (nfp_net_rx_freelist_setup(dev) < 0) {
140 * An error returned by this function should mean the app
141 * exiting and then the system releasing all the memory
142 * allocated even memory coming from hugepages.
144 * The device could be enabled at this point with some queues
145 * ready for getting packets. This is true if the call to
146 * nfp_net_rx_freelist_setup() succeeds for some queues but
147 * fails for subsequent queues.
149 * This should make the app exiting but better if we tell the
152 nfp_net_disable_queues(dev);
158 nfp_netvf_stop(struct rte_eth_dev *dev)
160 struct nfp_net_txq *this_tx_q;
161 struct nfp_net_rxq *this_rx_q;
164 PMD_INIT_LOG(DEBUG, "Stop");
166 nfp_net_disable_queues(dev);
169 for (i = 0; i < dev->data->nb_tx_queues; i++) {
170 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
171 nfp_net_reset_tx_queue(this_tx_q);
174 for (i = 0; i < dev->data->nb_rx_queues; i++) {
175 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
176 nfp_net_reset_rx_queue(this_rx_q);
183 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
188 /* Set the link down. */
190 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
195 /* Reset and stop device. The device can not be restarted. */
197 nfp_netvf_close(struct rte_eth_dev *dev)
199 struct rte_pci_device *pci_dev;
200 struct nfp_net_txq *this_tx_q;
201 struct nfp_net_rxq *this_rx_q;
204 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
207 PMD_INIT_LOG(DEBUG, "Close");
209 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
212 * We assume that the DPDK application is stopping all the
213 * threads/queues before calling the device close function.
216 nfp_net_disable_queues(dev);
219 for (i = 0; i < dev->data->nb_tx_queues; i++) {
220 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
221 nfp_net_reset_tx_queue(this_tx_q);
222 nfp_net_tx_queue_release(dev, i);
225 for (i = 0; i < dev->data->nb_rx_queues; i++) {
226 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
227 nfp_net_reset_rx_queue(this_rx_q);
228 nfp_net_rx_queue_release(dev, i);
231 rte_intr_disable(pci_dev->intr_handle);
233 /* unregister callback func from eal lib */
234 rte_intr_callback_unregister(pci_dev->intr_handle,
235 nfp_net_dev_interrupt_handler,
238 /* Cancel possible impending LSC work here before releasing the port*/
239 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
243 * The ixgbe PMD disables the pcie master on the
244 * device. The i40e does not...
250 /* Initialise and register VF driver with DPDK Application */
251 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
252 .dev_configure = nfp_net_configure,
253 .dev_start = nfp_netvf_start,
254 .dev_stop = nfp_netvf_stop,
255 .dev_set_link_up = nfp_netvf_set_link_up,
256 .dev_set_link_down = nfp_netvf_set_link_down,
257 .dev_close = nfp_netvf_close,
258 .promiscuous_enable = nfp_net_promisc_enable,
259 .promiscuous_disable = nfp_net_promisc_disable,
260 .link_update = nfp_net_link_update,
261 .stats_get = nfp_net_stats_get,
262 .stats_reset = nfp_net_stats_reset,
263 .dev_infos_get = nfp_net_infos_get,
264 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
265 .mtu_set = nfp_net_dev_mtu_set,
266 .mac_addr_set = nfp_set_mac_addr,
267 .vlan_offload_set = nfp_net_vlan_offload_set,
268 .reta_update = nfp_net_reta_update,
269 .reta_query = nfp_net_reta_query,
270 .rss_hash_update = nfp_net_rss_hash_update,
271 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
272 .rx_queue_setup = nfp_net_rx_queue_setup,
273 .rx_queue_release = nfp_net_rx_queue_release,
274 .tx_queue_setup = nfp_net_tx_queue_setup,
275 .tx_queue_release = nfp_net_tx_queue_release,
276 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
277 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
281 nfp_netvf_init(struct rte_eth_dev *eth_dev)
283 struct rte_pci_device *pci_dev;
284 struct nfp_net_hw *hw;
285 struct rte_ether_addr *tmp_ether_addr;
287 uint64_t tx_bar_off = 0, rx_bar_off = 0;
293 PMD_INIT_FUNC_TRACE();
295 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
297 /* NFP can not handle DMA addresses requiring more than 40 bits */
298 if (rte_mem_check_dma_mask(40)) {
299 RTE_LOG(ERR, PMD, "device %s can not be used:",
300 pci_dev->device.name);
301 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
305 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
307 eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
308 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
309 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
310 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
312 /* For secondary processes, the primary has done all the work */
313 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
316 rte_eth_copy_pci_info(eth_dev, pci_dev);
318 hw->device_id = pci_dev->id.device_id;
319 hw->vendor_id = pci_dev->id.vendor_id;
320 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
321 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
323 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
324 pci_dev->id.vendor_id, pci_dev->id.device_id,
325 pci_dev->addr.domain, pci_dev->addr.bus,
326 pci_dev->addr.devid, pci_dev->addr.function);
328 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
329 if (hw->ctrl_bar == NULL) {
331 "hw->ctrl_bar is NULL. BAR0 not configured");
335 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
337 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
338 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
340 /* Work out where in the BAR the queues start. */
341 switch (pci_dev->id.device_id) {
342 case PCI_DEVICE_ID_NFP6000_VF_NIC:
343 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
344 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
345 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
346 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
349 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
351 goto dev_err_ctrl_map;
354 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
355 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
357 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
359 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
362 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
363 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
365 nfp_net_cfg_queue_setup(hw);
367 /* Get some of the read-only fields from the config BAR */
368 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
369 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
370 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
371 hw->mtu = RTE_ETHER_MTU;
373 /* VLAN insertion is incompatible with LSOv2 */
374 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
375 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
377 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
378 hw->rx_offset = NFP_NET_RX_OFFSET;
380 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
382 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
383 NFD_CFG_MAJOR_VERSION_of(hw->ver),
384 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
386 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
387 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
388 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
389 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
390 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
391 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
392 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
393 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
394 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
395 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
396 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
397 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
398 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
399 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
400 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
404 hw->stride_rx = stride;
405 hw->stride_tx = stride;
407 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
408 hw->max_rx_queues, hw->max_tx_queues);
410 /* Initializing spinlock for reconfigs */
411 rte_spinlock_init(&hw->reconfig_lock);
413 /* Allocating memory for mac addr */
414 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
415 RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
419 goto dev_err_queues_map;
422 nfp_netvf_read_mac(hw);
424 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
425 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
426 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
428 /* Using random mac addresses for VFs */
429 rte_eth_random_addr(&hw->mac_addr[0]);
430 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
433 /* Copying mac address to DPDK eth_dev struct */
434 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
435 ð_dev->data->mac_addrs[0]);
437 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
438 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
440 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
442 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
443 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
444 eth_dev->data->port_id, pci_dev->id.vendor_id,
445 pci_dev->id.device_id,
446 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
447 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
449 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
450 /* Registering LSC interrupt handler */
451 rte_intr_callback_register(pci_dev->intr_handle,
452 nfp_net_dev_interrupt_handler,
454 /* Telling the firmware about the LSC interrupt entry */
455 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
456 /* Recording current stats counters values */
457 nfp_net_stats_reset(eth_dev);
463 nfp_cpp_area_free(hw->hwqueues_area);
465 nfp_cpp_area_free(hw->ctrl_area);
470 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
472 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
473 PCI_DEVICE_ID_NFP6000_VF_NIC)
480 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
482 /* VF cleanup, just free private port data */
483 return nfp_netvf_close(eth_dev);
486 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
487 struct rte_pci_device *pci_dev)
489 return rte_eth_dev_pci_generic_probe(pci_dev,
490 sizeof(struct nfp_net_adapter), nfp_netvf_init);
493 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
495 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
498 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
499 .id_table = pci_id_nfp_vf_net_map,
500 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
501 .probe = eth_nfp_vf_pci_probe,
502 .remove = eth_nfp_vf_pci_remove,
505 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
506 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
507 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
510 * c-file-style: "Linux"
511 * indent-tabs-mode: t