1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
7 #include <rte_common.h>
10 #include <rte_vhost.h>
14 #include "sfc_vdpa_ops.h"
16 extern uint32_t sfc_logtype_driver;
19 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
23 sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,
24 size_t len, efsys_mem_t *esmp)
27 size_t mcdi_buff_size;
28 char mz_name[RTE_MEMZONE_NAMESIZE];
29 const struct rte_memzone *mz = NULL;
30 int numa_node = sva->pdev->device.numa_node;
33 mcdi_buff_size = RTE_ALIGN_CEIL(len, PAGE_SIZE);
34 ret = snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%s",
35 sva->pdev->name, name);
36 if (ret < 0 || ret >= RTE_MEMZONE_NAMESIZE) {
37 sfc_vdpa_err(sva, "%s_%s too long to fit in mz_name",
38 sva->pdev->name, name);
42 sfc_vdpa_log_init(sva, "name=%s, len=%zu", mz_name, len);
44 mz = rte_memzone_reserve_aligned(mz_name, mcdi_buff_size,
46 RTE_MEMZONE_IOVA_CONTIG,
49 sfc_vdpa_err(sva, "cannot reserve memory for %s: len=%#x: %s",
50 mz_name, (unsigned int)len,
51 rte_strerror(rte_errno));
55 /* IOVA address for MCDI would be re-calculated if mapping
56 * using default IOVA would fail.
57 * TODO: Earlier there was no way to get valid IOVA range.
58 * Recently a patch has been submitted to get the IOVA range
59 * using ioctl. VFIO_IOMMU_GET_INFO. This patch is available
60 * in the kernel version >= 5.4. Support to get the default
61 * IOVA address for MCDI buffer using available IOVA range
62 * would be added later. Meanwhile default IOVA for MCDI buffer
63 * is kept at high mem at 2TB. In case of overlap new available
64 * addresses would be searched and same would be used.
66 mcdi_iova = SFC_VDPA_DEFAULT_MCDI_IOVA;
69 ret = rte_vfio_container_dma_map(sva->vfio_container_fd,
70 (uint64_t)mz->addr, mcdi_iova,
75 mcdi_iova = mcdi_iova >> 1;
76 if (mcdi_iova < mcdi_buff_size) {
78 "DMA mapping failed for MCDI : %s",
79 rte_strerror(rte_errno));
85 esmp->esm_addr = mcdi_iova;
86 esmp->esm_base = mz->addr;
87 sva->mcdi_buff_size = mcdi_buff_size;
90 "DMA name=%s len=%zu => virt=%p iova=0x%" PRIx64,
91 name, len, esmp->esm_base, esmp->esm_addr);
97 sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp)
101 sfc_vdpa_log_init(sva, "name=%s", esmp->esm_mz->name);
103 ret = rte_vfio_container_dma_unmap(sva->vfio_container_fd,
104 (uint64_t)esmp->esm_base,
105 esmp->esm_addr, sva->mcdi_buff_size);
107 sfc_vdpa_err(sva, "DMA unmap failed for MCDI : %s",
108 rte_strerror(rte_errno));
111 "DMA free name=%s => virt=%p iova=0x%" PRIx64,
112 esmp->esm_mz->name, esmp->esm_base, esmp->esm_addr);
114 rte_free((void *)(esmp->esm_base));
116 sva->mcdi_buff_size = 0;
117 memset(esmp, 0, sizeof(*esmp));
121 sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)
125 struct rte_vhost_memory *vhost_mem = NULL;
126 struct rte_vhost_mem_region *mem_reg = NULL;
127 int vfio_container_fd;
130 dev = ops_data->dev_handle;
132 sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;
134 rc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);
137 "failed to get VM memory layout");
141 for (i = 0; i < vhost_mem->nregions; i++) {
142 mem_reg = &vhost_mem->regions[i];
145 rc = rte_vfio_container_dma_map(vfio_container_fd,
146 mem_reg->host_user_addr,
147 mem_reg->guest_phys_addr,
151 "DMA map failed : %s",
152 rte_strerror(rte_errno));
153 goto failed_vfio_dma_map;
156 rc = rte_vfio_container_dma_unmap(vfio_container_fd,
157 mem_reg->host_user_addr,
158 mem_reg->guest_phys_addr,
162 "DMA unmap failed : %s",
163 rte_strerror(rte_errno));
174 for (j = 0; j < i; j++) {
175 mem_reg = &vhost_mem->regions[j];
176 rte_vfio_container_dma_unmap(vfio_container_fd,
177 mem_reg->host_user_addr,
178 mem_reg->guest_phys_addr,
189 sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
190 const efx_bar_region_t *mem_ebrp)
192 struct rte_pci_device *pci_dev = sva->pdev;
193 efsys_bar_t *ebp = &sva->mem_bar;
194 struct rte_mem_resource *res =
195 &pci_dev->mem_resource[mem_ebrp->ebr_index];
197 SFC_BAR_LOCK_INIT(ebp, pci_dev->name);
198 ebp->esb_rid = mem_ebrp->ebr_index;
199 ebp->esb_dev = pci_dev;
200 ebp->esb_base = res->addr;
206 sfc_vdpa_mem_bar_fini(struct sfc_vdpa_adapter *sva)
208 efsys_bar_t *ebp = &sva->mem_bar;
210 SFC_BAR_LOCK_DESTROY(ebp);
211 memset(ebp, 0, sizeof(*ebp));
215 sfc_vdpa_nic_probe(struct sfc_vdpa_adapter *sva)
217 efx_nic_t *enp = sva->nic;
220 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
222 sfc_vdpa_err(sva, "nic probe failed: %s", rte_strerror(rc));
228 sfc_vdpa_estimate_resource_limits(struct sfc_vdpa_adapter *sva)
230 efx_drv_limits_t limits;
232 uint32_t evq_allocated;
233 uint32_t rxq_allocated;
234 uint32_t txq_allocated;
235 uint32_t max_queue_cnt;
237 memset(&limits, 0, sizeof(limits));
239 /* Request at least one Rx and Tx queue */
240 limits.edl_min_rxq_count = 1;
241 limits.edl_min_txq_count = 1;
242 /* Management event queue plus event queue for Tx/Rx queue */
243 limits.edl_min_evq_count =
244 1 + RTE_MAX(limits.edl_min_rxq_count, limits.edl_min_txq_count);
246 limits.edl_max_rxq_count = SFC_VDPA_MAX_QUEUE_PAIRS;
247 limits.edl_max_txq_count = SFC_VDPA_MAX_QUEUE_PAIRS;
248 limits.edl_max_evq_count = 1 + SFC_VDPA_MAX_QUEUE_PAIRS;
250 SFC_VDPA_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
251 SFC_VDPA_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
252 SFC_VDPA_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
254 /* Configure the minimum required resources needed for the
255 * driver to operate, and the maximum desired resources that the
256 * driver is capable of using.
258 sfc_vdpa_log_init(sva, "set drv limit");
259 efx_nic_set_drv_limits(sva->nic, &limits);
261 sfc_vdpa_log_init(sva, "init nic");
262 rc = efx_nic_init(sva->nic);
264 sfc_vdpa_err(sva, "nic init failed: %s", rte_strerror(rc));
268 /* Find resource dimensions assigned by firmware to this function */
269 rc = efx_nic_get_vi_pool(sva->nic, &evq_allocated, &rxq_allocated,
272 sfc_vdpa_err(sva, "vi pool get failed: %s", rte_strerror(rc));
273 goto fail_get_vi_pool;
276 /* It still may allocate more than maximum, ensure limit */
277 evq_allocated = RTE_MIN(evq_allocated, limits.edl_max_evq_count);
278 rxq_allocated = RTE_MIN(rxq_allocated, limits.edl_max_rxq_count);
279 txq_allocated = RTE_MIN(txq_allocated, limits.edl_max_txq_count);
282 max_queue_cnt = RTE_MIN(rxq_allocated, txq_allocated);
283 /* Subtract management EVQ not used for traffic */
284 max_queue_cnt = RTE_MIN(evq_allocated - 1, max_queue_cnt);
286 SFC_VDPA_ASSERT(max_queue_cnt > 0);
288 sva->max_queue_count = max_queue_cnt;
293 efx_nic_fini(sva->nic);
295 sfc_vdpa_log_init(sva, "failed: %s", rte_strerror(rc));
300 sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva)
302 efx_bar_region_t mem_ebr;
306 sfc_vdpa_log_init(sva, "entry");
308 sfc_vdpa_log_init(sva, "get family");
309 rc = sfc_efx_family(sva->pdev, &mem_ebr, &sva->family);
312 sfc_vdpa_log_init(sva,
313 "family is %u, membar is %d,"
314 "function control window offset is %#" PRIx64,
315 sva->family, mem_ebr.ebr_index, mem_ebr.ebr_offset);
317 sfc_vdpa_log_init(sva, "init mem bar");
318 rc = sfc_vdpa_mem_bar_init(sva, &mem_ebr);
320 goto fail_mem_bar_init;
322 sfc_vdpa_log_init(sva, "create nic");
323 rte_spinlock_init(&sva->nic_lock);
324 rc = efx_nic_create(sva->family, (efsys_identifier_t *)sva,
325 &sva->mem_bar, mem_ebr.ebr_offset,
326 &sva->nic_lock, &enp);
328 sfc_vdpa_err(sva, "nic create failed: %s", rte_strerror(rc));
329 goto fail_nic_create;
333 sfc_vdpa_log_init(sva, "init mcdi");
334 rc = sfc_vdpa_mcdi_init(sva);
336 sfc_vdpa_err(sva, "mcdi init failed: %s", rte_strerror(rc));
340 sfc_vdpa_log_init(sva, "probe nic");
341 rc = sfc_vdpa_nic_probe(sva);
345 sfc_vdpa_log_init(sva, "reset nic");
346 rc = efx_nic_reset(enp);
348 sfc_vdpa_err(sva, "nic reset failed: %s", rte_strerror(rc));
352 sfc_vdpa_log_init(sva, "estimate resource limits");
353 rc = sfc_vdpa_estimate_resource_limits(sva);
355 goto fail_estimate_rsrc_limits;
357 sfc_vdpa_log_init(sva, "init virtio");
358 rc = efx_virtio_init(enp);
360 sfc_vdpa_err(sva, "virtio init failed: %s", rte_strerror(rc));
361 goto fail_virtio_init;
364 sfc_vdpa_log_init(sva, "init filter");
365 rc = efx_filter_init(enp);
367 sfc_vdpa_err(sva, "filter init failed: %s", rte_strerror(rc));
368 goto fail_filter_init;
371 sfc_vdpa_log_init(sva, "done");
376 efx_virtio_fini(enp);
381 fail_estimate_rsrc_limits:
383 efx_nic_unprobe(enp);
386 sfc_vdpa_mcdi_fini(sva);
389 sfc_vdpa_log_init(sva, "destroy nic");
391 efx_nic_destroy(enp);
394 sfc_vdpa_mem_bar_fini(sva);
398 sfc_vdpa_log_init(sva, "failed: %s", rte_strerror(rc));
403 sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sva)
405 efx_nic_t *enp = sva->nic;
407 sfc_vdpa_log_init(sva, "entry");
409 sfc_vdpa_log_init(sva, "virtio fini");
410 efx_virtio_fini(enp);
412 sfc_vdpa_log_init(sva, "unprobe nic");
413 efx_nic_unprobe(enp);
415 sfc_vdpa_log_init(sva, "mcdi fini");
416 sfc_vdpa_mcdi_fini(sva);
418 sfc_vdpa_log_init(sva, "nic fini");
421 sfc_vdpa_log_init(sva, "destroy nic");
423 efx_nic_destroy(enp);
425 sfc_vdpa_mem_bar_fini(sva);