817fe64dbceb5ef63d58463827150045c3ade4f0
[dpdk.git] / drivers / net / nfp / nfp_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev_vf.c
12  *
13  * Netronome vNIC  VF DPDK Poll-Mode Driver: Main entry point
14  */
15
16 #include <rte_alarm.h>
17
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
20
21 #include "nfp_common.h"
22 #include "nfp_rxtx.h"
23 #include "nfp_logs.h"
24 #include "nfp_ctrl.h"
25
26 static void nfp_netvf_read_mac(struct nfp_net_hw *hw);
27 static int nfp_netvf_start(struct rte_eth_dev *dev);
28 static int nfp_netvf_stop(struct rte_eth_dev *dev);
29 static int nfp_netvf_set_link_up(struct rte_eth_dev *dev);
30 static int nfp_netvf_set_link_down(struct rte_eth_dev *dev);
31 static int nfp_netvf_close(struct rte_eth_dev *dev);
32 static int nfp_netvf_init(struct rte_eth_dev *eth_dev);
33 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev);
34 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
35         struct rte_pci_device *pci_dev);
36 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev);
37
38 static void
39 nfp_netvf_read_mac(struct nfp_net_hw *hw)
40 {
41         uint32_t tmp;
42
43         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
44         memcpy(&hw->mac_addr[0], &tmp, 4);
45
46         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
47         memcpy(&hw->mac_addr[4], &tmp, 2);
48 }
49
50 static int
51 nfp_netvf_start(struct rte_eth_dev *dev)
52 {
53         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
54         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
55         uint32_t new_ctrl, update = 0;
56         struct nfp_net_hw *hw;
57         struct rte_eth_conf *dev_conf;
58         struct rte_eth_rxmode *rxmode;
59         uint32_t intr_vector;
60         int ret;
61
62         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
63
64         PMD_INIT_LOG(DEBUG, "Start");
65
66         /* Disabling queues just in case... */
67         nfp_net_disable_queues(dev);
68
69         /* Enabling the required queues in the device */
70         nfp_net_enable_queues(dev);
71
72         /* check and configure queue intr-vector mapping */
73         if (dev->data->dev_conf.intr_conf.rxq != 0) {
74                 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
75                         /*
76                          * Better not to share LSC with RX interrupts.
77                          * Unregistering LSC interrupt handler
78                          */
79                         rte_intr_callback_unregister(&pci_dev->intr_handle,
80                                 nfp_net_dev_interrupt_handler, (void *)dev);
81
82                         if (dev->data->nb_rx_queues > 1) {
83                                 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
84                                              "supports 1 queue with UIO");
85                                 return -EIO;
86                         }
87                 }
88                 intr_vector = dev->data->nb_rx_queues;
89                 if (rte_intr_efd_enable(intr_handle, intr_vector))
90                         return -1;
91
92                 nfp_configure_rx_interrupt(dev, intr_handle);
93                 update = NFP_NET_CFG_UPDATE_MSIX;
94         }
95
96         rte_intr_enable(intr_handle);
97
98         new_ctrl = nfp_check_offloads(dev);
99
100         /* Writing configuration parameters in the device */
101         nfp_net_params_setup(hw);
102
103         dev_conf = &dev->data->dev_conf;
104         rxmode = &dev_conf->rxmode;
105
106         if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
107                 nfp_net_rss_config_default(dev);
108                 update |= NFP_NET_CFG_UPDATE_RSS;
109                 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
110         }
111
112         /* Enable device */
113         new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
114
115         update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
116
117         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
118                 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
119
120         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
121         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
122                 return -EIO;
123
124         /*
125          * Allocating rte mbufs for configured rx queues.
126          * This requires queues being enabled before
127          */
128         if (nfp_net_rx_freelist_setup(dev) < 0) {
129                 ret = -ENOMEM;
130                 goto error;
131         }
132
133         hw->ctrl = new_ctrl;
134
135         return 0;
136
137 error:
138         /*
139          * An error returned by this function should mean the app
140          * exiting and then the system releasing all the memory
141          * allocated even memory coming from hugepages.
142          *
143          * The device could be enabled at this point with some queues
144          * ready for getting packets. This is true if the call to
145          * nfp_net_rx_freelist_setup() succeeds for some queues but
146          * fails for subsequent queues.
147          *
148          * This should make the app exiting but better if we tell the
149          * device first.
150          */
151         nfp_net_disable_queues(dev);
152
153         return ret;
154 }
155
156 static int
157 nfp_netvf_stop(struct rte_eth_dev *dev)
158 {
159         struct nfp_net_txq *this_tx_q;
160         struct nfp_net_rxq *this_rx_q;
161         int i;
162
163         PMD_INIT_LOG(DEBUG, "Stop");
164
165         nfp_net_disable_queues(dev);
166
167         /* Clear queues */
168         for (i = 0; i < dev->data->nb_tx_queues; i++) {
169                 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
170                 nfp_net_reset_tx_queue(this_tx_q);
171         }
172
173         for (i = 0; i < dev->data->nb_rx_queues; i++) {
174                 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
175                 nfp_net_reset_rx_queue(this_rx_q);
176         }
177
178         return 0;
179 }
180
181 static int
182 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
183 {
184         return -ENOTSUP;
185 }
186
187 /* Set the link down. */
188 static int
189 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
190 {
191         return -ENOTSUP;
192 }
193
194 /* Reset and stop device. The device can not be restarted. */
195 static int
196 nfp_netvf_close(struct rte_eth_dev *dev)
197 {
198         struct rte_pci_device *pci_dev;
199         struct nfp_net_txq *this_tx_q;
200         struct nfp_net_rxq *this_rx_q;
201         int i;
202
203         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
204                 return 0;
205
206         PMD_INIT_LOG(DEBUG, "Close");
207
208         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
209
210         /*
211          * We assume that the DPDK application is stopping all the
212          * threads/queues before calling the device close function.
213          */
214
215         nfp_net_disable_queues(dev);
216
217         /* Clear queues */
218         for (i = 0; i < dev->data->nb_tx_queues; i++) {
219                 this_tx_q =  (struct nfp_net_txq *)dev->data->tx_queues[i];
220                 nfp_net_reset_tx_queue(this_tx_q);
221         }
222
223         for (i = 0; i < dev->data->nb_rx_queues; i++) {
224                 this_rx_q =  (struct nfp_net_rxq *)dev->data->rx_queues[i];
225                 nfp_net_reset_rx_queue(this_rx_q);
226         }
227
228         rte_intr_disable(&pci_dev->intr_handle);
229
230         /* unregister callback func from eal lib */
231         rte_intr_callback_unregister(&pci_dev->intr_handle,
232                                      nfp_net_dev_interrupt_handler,
233                                      (void *)dev);
234
235         /* Cancel possible impending LSC work here before releasing the port*/
236         rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
237                              (void *)dev);
238
239         /*
240          * The ixgbe PMD driver disables the pcie master on the
241          * device. The i40e does not...
242          */
243
244         return 0;
245 }
246
247 /* Initialise and register VF driver with DPDK Application */
248 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
249         .dev_configure          = nfp_net_configure,
250         .dev_start              = nfp_netvf_start,
251         .dev_stop               = nfp_netvf_stop,
252         .dev_set_link_up        = nfp_netvf_set_link_up,
253         .dev_set_link_down      = nfp_netvf_set_link_down,
254         .dev_close              = nfp_netvf_close,
255         .promiscuous_enable     = nfp_net_promisc_enable,
256         .promiscuous_disable    = nfp_net_promisc_disable,
257         .link_update            = nfp_net_link_update,
258         .stats_get              = nfp_net_stats_get,
259         .stats_reset            = nfp_net_stats_reset,
260         .dev_infos_get          = nfp_net_infos_get,
261         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
262         .mtu_set                = nfp_net_dev_mtu_set,
263         .mac_addr_set           = nfp_set_mac_addr,
264         .vlan_offload_set       = nfp_net_vlan_offload_set,
265         .reta_update            = nfp_net_reta_update,
266         .reta_query             = nfp_net_reta_query,
267         .rss_hash_update        = nfp_net_rss_hash_update,
268         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
269         .rx_queue_setup         = nfp_net_rx_queue_setup,
270         .rx_queue_release       = nfp_net_rx_queue_release,
271         .tx_queue_setup         = nfp_net_tx_queue_setup,
272         .tx_queue_release       = nfp_net_tx_queue_release,
273         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
274         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
275 };
276
277 static int
278 nfp_netvf_init(struct rte_eth_dev *eth_dev)
279 {
280         struct rte_pci_device *pci_dev;
281         struct nfp_net_hw *hw;
282         struct rte_ether_addr *tmp_ether_addr;
283
284         uint64_t tx_bar_off = 0, rx_bar_off = 0;
285         uint32_t start_q;
286         int stride = 4;
287         int port = 0;
288         int err;
289
290         PMD_INIT_FUNC_TRACE();
291
292         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
293
294         /* NFP can not handle DMA addresses requiring more than 40 bits */
295         if (rte_mem_check_dma_mask(40)) {
296                 RTE_LOG(ERR, PMD, "device %s can not be used:",
297                                    pci_dev->device.name);
298                 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
299                 return -ENODEV;
300         };
301
302         hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
303
304         eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
305         eth_dev->rx_queue_count = nfp_net_rx_queue_count;
306         eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
307         eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
308
309         /* For secondary processes, the primary has done all the work */
310         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
311                 return 0;
312
313         rte_eth_copy_pci_info(eth_dev, pci_dev);
314
315         hw->device_id = pci_dev->id.device_id;
316         hw->vendor_id = pci_dev->id.vendor_id;
317         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
318         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
319
320         PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
321                      pci_dev->id.vendor_id, pci_dev->id.device_id,
322                      pci_dev->addr.domain, pci_dev->addr.bus,
323                      pci_dev->addr.devid, pci_dev->addr.function);
324
325         hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
326         if (hw->ctrl_bar == NULL) {
327                 PMD_DRV_LOG(ERR,
328                         "hw->ctrl_bar is NULL. BAR0 not configured");
329                 return -ENODEV;
330         }
331
332         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
333
334         hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
335         hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
336
337         /* Work out where in the BAR the queues start. */
338         switch (pci_dev->id.device_id) {
339         case PCI_DEVICE_ID_NFP6000_VF_NIC:
340                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
341                 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
342                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
343                 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
344                 break;
345         default:
346                 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
347                 err = -ENODEV;
348                 goto dev_err_ctrl_map;
349         }
350
351         PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
352         PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
353
354         hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
355                      tx_bar_off;
356         hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
357                      rx_bar_off;
358
359         PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
360                      hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
361
362         nfp_net_cfg_queue_setup(hw);
363
364         /* Get some of the read-only fields from the config BAR */
365         hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
366         hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
367         hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
368         hw->mtu = RTE_ETHER_MTU;
369
370         /* VLAN insertion is incompatible with LSOv2 */
371         if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
372                 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
373
374         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
375                 hw->rx_offset = NFP_NET_RX_OFFSET;
376         else
377                 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
378
379         PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
380                            NFD_CFG_MAJOR_VERSION_of(hw->ver),
381                            NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
382
383         PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
384                      hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
385                      hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
386                      hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
387                      hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
388                      hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
389                      hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
390                      hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
391                      hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
392                      hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
393                      hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
394                      hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
395                      hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
396                      hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
397                      hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
398
399         hw->ctrl = 0;
400
401         hw->stride_rx = stride;
402         hw->stride_tx = stride;
403
404         PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
405                      hw->max_rx_queues, hw->max_tx_queues);
406
407         /* Initializing spinlock for reconfigs */
408         rte_spinlock_init(&hw->reconfig_lock);
409
410         /* Allocating memory for mac addr */
411         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
412                                                RTE_ETHER_ADDR_LEN, 0);
413         if (eth_dev->data->mac_addrs == NULL) {
414                 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
415                 err = -ENOMEM;
416                 goto dev_err_queues_map;
417         }
418
419         nfp_netvf_read_mac(hw);
420
421         tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
422         if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
423                 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
424                                    port);
425                 /* Using random mac addresses for VFs */
426                 rte_eth_random_addr(&hw->mac_addr[0]);
427                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
428         }
429
430         /* Copying mac address to DPDK eth_dev struct */
431         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
432                         &eth_dev->data->mac_addrs[0]);
433
434         if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
435                 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
436
437         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
438
439         PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
440                      "mac=%02x:%02x:%02x:%02x:%02x:%02x",
441                      eth_dev->data->port_id, pci_dev->id.vendor_id,
442                      pci_dev->id.device_id,
443                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
444                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
445
446         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
447                 /* Registering LSC interrupt handler */
448                 rte_intr_callback_register(&pci_dev->intr_handle,
449                                            nfp_net_dev_interrupt_handler,
450                                            (void *)eth_dev);
451                 /* Telling the firmware about the LSC interrupt entry */
452                 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
453                 /* Recording current stats counters values */
454                 nfp_net_stats_reset(eth_dev);
455         }
456
457         return 0;
458
459 dev_err_queues_map:
460                 nfp_cpp_area_free(hw->hwqueues_area);
461 dev_err_ctrl_map:
462                 nfp_cpp_area_free(hw->ctrl_area);
463
464         return err;
465 }
466
467 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
468         {
469                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
470                                PCI_DEVICE_ID_NFP6000_VF_NIC)
471         },
472         {
473                 .vendor_id = 0,
474         },
475 };
476
477 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
478 {
479         /* VF cleanup, just free private port data */
480         return nfp_netvf_close(eth_dev);
481 }
482
483 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
484         struct rte_pci_device *pci_dev)
485 {
486         return rte_eth_dev_pci_generic_probe(pci_dev,
487                 sizeof(struct nfp_net_adapter), nfp_netvf_init);
488 }
489
490 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
491 {
492         return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
493 }
494
495 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
496         .id_table = pci_id_nfp_vf_net_map,
497         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
498         .probe = eth_nfp_vf_pci_probe,
499         .remove = eth_nfp_vf_pci_remove,
500 };
501
502 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
503 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
504 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
505 /*
506  * Local variables:
507  * c-file-style: "Linux"
508  * indent-tabs-mode: t
509  * End:
510  */