1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
13 #include <rte_vhost.h>
19 TAILQ_HEAD(sfc_vdpa_adapter_list_head, sfc_vdpa_adapter);
20 static struct sfc_vdpa_adapter_list_head sfc_vdpa_adapter_list =
21 TAILQ_HEAD_INITIALIZER(sfc_vdpa_adapter_list);
23 static pthread_mutex_t sfc_vdpa_adapter_list_lock = PTHREAD_MUTEX_INITIALIZER;
25 struct sfc_vdpa_adapter *
26 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev)
29 struct sfc_vdpa_adapter *sva;
31 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
33 TAILQ_FOREACH(sva, &sfc_vdpa_adapter_list, next) {
34 if (pdev == sva->pdev) {
40 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
42 return found ? sva : NULL;
46 sfc_vdpa_vfio_setup(struct sfc_vdpa_adapter *sva)
48 struct rte_pci_device *dev = sva->pdev;
49 char dev_name[RTE_DEV_NAME_MAX_LEN] = {0};
52 rte_pci_device_name(&dev->addr, dev_name, RTE_DEV_NAME_MAX_LEN);
54 sva->vfio_container_fd = rte_vfio_container_create();
55 if (sva->vfio_container_fd < 0) {
56 sfc_vdpa_err(sva, "failed to create VFIO container");
57 goto fail_container_create;
60 rc = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), dev_name,
61 &sva->iommu_group_num);
63 sfc_vdpa_err(sva, "failed to get IOMMU group for %s : %s",
64 dev_name, rte_strerror(-rc));
65 goto fail_get_group_num;
69 rte_vfio_container_group_bind(sva->vfio_container_fd,
70 sva->iommu_group_num);
71 if (sva->vfio_group_fd < 0) {
73 "failed to bind IOMMU group %d to container %d",
74 sva->iommu_group_num, sva->vfio_container_fd);
78 if (rte_pci_map_device(dev) != 0) {
79 sfc_vdpa_err(sva, "failed to map PCI device %s : %s",
80 dev_name, rte_strerror(rte_errno));
81 goto fail_pci_map_device;
84 sva->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
89 if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
90 sva->iommu_group_num) != 0) {
92 "failed to unbind IOMMU group %d from container %d",
93 sva->iommu_group_num, sva->vfio_container_fd);
98 if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
99 sfc_vdpa_err(sva, "failed to destroy container %d",
100 sva->vfio_container_fd);
103 fail_container_create:
108 sfc_vdpa_vfio_teardown(struct sfc_vdpa_adapter *sva)
110 rte_pci_unmap_device(sva->pdev);
112 if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
113 sva->iommu_group_num) != 0) {
115 "failed to unbind IOMMU group %d from container %d",
116 sva->iommu_group_num, sva->vfio_container_fd);
119 if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
121 "failed to destroy container %d",
122 sva->vfio_container_fd);
127 sfc_vdpa_set_log_prefix(struct sfc_vdpa_adapter *sva)
129 struct rte_pci_device *pci_dev = sva->pdev;
132 ret = snprintf(sva->log_prefix, sizeof(sva->log_prefix),
133 "PMD: sfc_vdpa " PCI_PRI_FMT " : ",
134 pci_dev->addr.domain, pci_dev->addr.bus,
135 pci_dev->addr.devid, pci_dev->addr.function);
137 if (ret < 0 || ret >= (int)sizeof(sva->log_prefix)) {
138 SFC_VDPA_GENERIC_LOG(ERR,
139 "reserved log prefix is too short for " PCI_PRI_FMT,
140 pci_dev->addr.domain, pci_dev->addr.bus,
141 pci_dev->addr.devid, pci_dev->addr.function);
149 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
150 const char *lt_prefix_str, uint32_t ll_default)
152 size_t lt_prefix_str_size = strlen(lt_prefix_str);
153 size_t lt_str_size_max;
157 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
158 ++lt_prefix_str_size; /* Reserve space for prefix separator */
159 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
161 return RTE_LOGTYPE_PMD;
164 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
166 return RTE_LOGTYPE_PMD;
168 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
169 lt_str[lt_prefix_str_size - 1] = '.';
170 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
171 lt_str_size_max - lt_prefix_str_size);
172 lt_str[lt_str_size_max - 1] = '\0';
174 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
177 return ret < 0 ? RTE_LOGTYPE_PMD : ret;
180 static struct rte_pci_id pci_id_sfc_vdpa_efx_map[] = {
181 { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD_VF) },
182 { .vendor_id = 0, /* sentinel */ },
186 sfc_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
187 struct rte_pci_device *pci_dev)
189 struct sfc_vdpa_adapter *sva = NULL;
190 uint32_t logtype_main;
193 if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
194 SFC_EFX_DEV_CLASS_VDPA) {
195 SFC_VDPA_GENERIC_LOG(INFO,
196 "Incompatible device class: skip probing, should be probed by other sfc driver.");
201 * It will not be probed in the secondary process. As device class
202 * is vdpa so return 0 to avoid probe by other sfc driver
204 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
207 logtype_main = sfc_vdpa_register_logtype(&pci_dev->addr,
208 SFC_VDPA_LOGTYPE_MAIN_STR,
211 sva = rte_zmalloc("sfc_vdpa", sizeof(struct sfc_vdpa_adapter), 0);
216 sva->logtype_main = logtype_main;
218 ret = sfc_vdpa_set_log_prefix(sva);
220 goto fail_set_log_prefix;
222 sfc_vdpa_log_init(sva, "entry");
224 sfc_vdpa_log_init(sva, "vfio init");
225 if (sfc_vdpa_vfio_setup(sva) < 0) {
226 sfc_vdpa_err(sva, "failed to setup device %s", pci_dev->name);
227 goto fail_vfio_setup;
230 sfc_vdpa_log_init(sva, "hw init");
231 if (sfc_vdpa_hw_init(sva) != 0) {
232 sfc_vdpa_err(sva, "failed to init HW %s", pci_dev->name);
236 sfc_vdpa_log_init(sva, "dev init");
237 sva->ops_data = sfc_vdpa_device_init(sva, SFC_VDPA_AS_VF);
238 if (sva->ops_data == NULL) {
239 sfc_vdpa_err(sva, "failed vDPA dev init %s", pci_dev->name);
243 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
244 TAILQ_INSERT_TAIL(&sfc_vdpa_adapter_list, sva, next);
245 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
247 sfc_vdpa_log_init(sva, "done");
252 sfc_vdpa_hw_fini(sva);
255 sfc_vdpa_vfio_teardown(sva);
266 sfc_vdpa_pci_remove(struct rte_pci_device *pci_dev)
268 struct sfc_vdpa_adapter *sva = NULL;
270 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
273 sva = sfc_vdpa_get_adapter_by_dev(pci_dev);
275 sfc_vdpa_info(sva, "invalid device: %s", pci_dev->name);
279 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
280 TAILQ_REMOVE(&sfc_vdpa_adapter_list, sva, next);
281 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
283 sfc_vdpa_device_fini(sva->ops_data);
285 sfc_vdpa_hw_fini(sva);
287 sfc_vdpa_vfio_teardown(sva);
294 static struct rte_pci_driver rte_sfc_vdpa = {
295 .id_table = pci_id_sfc_vdpa_efx_map,
297 .probe = sfc_vdpa_pci_probe,
298 .remove = sfc_vdpa_pci_remove,
301 RTE_PMD_REGISTER_PCI(net_sfc_vdpa, rte_sfc_vdpa);
302 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_vdpa, pci_id_sfc_vdpa_efx_map);
303 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_vdpa, "* vfio-pci");
304 RTE_LOG_REGISTER_SUFFIX(sfc_vdpa_logtype_driver, driver, NOTICE);