1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
9 #include <rte_common.h>
10 #include <rte_devargs.h>
11 #include <rte_errno.h>
12 #include <rte_kvargs.h>
13 #include <rte_string_fns.h>
15 #include <rte_vhost.h>
21 TAILQ_HEAD(sfc_vdpa_adapter_list_head, sfc_vdpa_adapter);
22 static struct sfc_vdpa_adapter_list_head sfc_vdpa_adapter_list =
23 TAILQ_HEAD_INITIALIZER(sfc_vdpa_adapter_list);
25 static pthread_mutex_t sfc_vdpa_adapter_list_lock = PTHREAD_MUTEX_INITIALIZER;
27 struct sfc_vdpa_adapter *
28 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev)
31 struct sfc_vdpa_adapter *sva;
33 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
35 TAILQ_FOREACH(sva, &sfc_vdpa_adapter_list, next) {
36 if (pdev == sva->pdev) {
42 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
44 return found ? sva : NULL;
47 struct sfc_vdpa_ops_data *
48 sfc_vdpa_get_data_by_dev(struct rte_vdpa_device *vdpa_dev)
51 struct sfc_vdpa_adapter *sva;
53 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
55 TAILQ_FOREACH(sva, &sfc_vdpa_adapter_list, next) {
56 if (vdpa_dev == sva->ops_data->vdpa_dev) {
62 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
64 return found ? sva->ops_data : NULL;
68 sfc_vdpa_vfio_setup(struct sfc_vdpa_adapter *sva)
70 struct rte_pci_device *dev = sva->pdev;
71 char dev_name[RTE_DEV_NAME_MAX_LEN] = {0};
74 rte_pci_device_name(&dev->addr, dev_name, RTE_DEV_NAME_MAX_LEN);
76 sva->vfio_container_fd = rte_vfio_container_create();
77 if (sva->vfio_container_fd < 0) {
78 sfc_vdpa_err(sva, "failed to create VFIO container");
79 goto fail_container_create;
82 rc = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), dev_name,
83 &sva->iommu_group_num);
85 sfc_vdpa_err(sva, "failed to get IOMMU group for %s : %s",
86 dev_name, rte_strerror(-rc));
87 goto fail_get_group_num;
91 rte_vfio_container_group_bind(sva->vfio_container_fd,
92 sva->iommu_group_num);
93 if (sva->vfio_group_fd < 0) {
95 "failed to bind IOMMU group %d to container %d",
96 sva->iommu_group_num, sva->vfio_container_fd);
100 if (rte_pci_map_device(dev) != 0) {
101 sfc_vdpa_err(sva, "failed to map PCI device %s : %s",
102 dev_name, rte_strerror(rte_errno));
103 goto fail_pci_map_device;
106 sva->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
111 if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
112 sva->iommu_group_num) != 0) {
114 "failed to unbind IOMMU group %d from container %d",
115 sva->iommu_group_num, sva->vfio_container_fd);
120 if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
121 sfc_vdpa_err(sva, "failed to destroy container %d",
122 sva->vfio_container_fd);
125 fail_container_create:
130 sfc_vdpa_vfio_teardown(struct sfc_vdpa_adapter *sva)
132 rte_pci_unmap_device(sva->pdev);
134 if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
135 sva->iommu_group_num) != 0) {
137 "failed to unbind IOMMU group %d from container %d",
138 sva->iommu_group_num, sva->vfio_container_fd);
141 if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
143 "failed to destroy container %d",
144 sva->vfio_container_fd);
149 sfc_vdpa_set_log_prefix(struct sfc_vdpa_adapter *sva)
151 struct rte_pci_device *pci_dev = sva->pdev;
154 ret = snprintf(sva->log_prefix, sizeof(sva->log_prefix),
155 "PMD: sfc_vdpa " PCI_PRI_FMT " : ",
156 pci_dev->addr.domain, pci_dev->addr.bus,
157 pci_dev->addr.devid, pci_dev->addr.function);
159 if (ret < 0 || ret >= (int)sizeof(sva->log_prefix)) {
160 SFC_VDPA_GENERIC_LOG(ERR,
161 "reserved log prefix is too short for " PCI_PRI_FMT,
162 pci_dev->addr.domain, pci_dev->addr.bus,
163 pci_dev->addr.devid, pci_dev->addr.function);
171 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
172 const char *lt_prefix_str, uint32_t ll_default)
174 size_t lt_prefix_str_size = strlen(lt_prefix_str);
175 size_t lt_str_size_max;
179 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
180 ++lt_prefix_str_size; /* Reserve space for prefix separator */
181 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
183 return RTE_LOGTYPE_PMD;
186 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
188 return RTE_LOGTYPE_PMD;
190 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
191 lt_str[lt_prefix_str_size - 1] = '.';
192 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
193 lt_str_size_max - lt_prefix_str_size);
194 lt_str[lt_str_size_max - 1] = '\0';
196 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
199 return ret < 0 ? RTE_LOGTYPE_PMD : ret;
203 sfc_vdpa_kvargs_parse(struct sfc_vdpa_adapter *sva)
205 struct rte_pci_device *pci_dev = sva->pdev;
206 struct rte_devargs *devargs = pci_dev->device.devargs;
208 * To get the device class a mandatory param 'class' is being
209 * used so included SFC_EFX_KVARG_DEV_CLASS in the param list.
211 const char **params = (const char *[]){
212 RTE_DEVARGS_KEY_CLASS,
220 sva->kvargs = rte_kvargs_parse(devargs->args, params);
221 if (sva->kvargs == NULL)
227 static struct rte_pci_id pci_id_sfc_vdpa_efx_map[] = {
228 { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD_VF) },
229 { .vendor_id = 0, /* sentinel */ },
233 sfc_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
234 struct rte_pci_device *pci_dev)
236 struct sfc_vdpa_adapter *sva = NULL;
237 uint32_t logtype_main;
240 if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
241 SFC_EFX_DEV_CLASS_VDPA) {
242 SFC_VDPA_GENERIC_LOG(INFO,
243 "Incompatible device class: skip probing, should be probed by other sfc driver.");
248 * It will not be probed in the secondary process. As device class
249 * is vdpa so return 0 to avoid probe by other sfc driver
251 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
254 logtype_main = sfc_vdpa_register_logtype(&pci_dev->addr,
255 SFC_VDPA_LOGTYPE_MAIN_STR,
258 sva = rte_zmalloc("sfc_vdpa", sizeof(struct sfc_vdpa_adapter), 0);
263 sva->logtype_main = logtype_main;
265 ret = sfc_vdpa_set_log_prefix(sva);
267 goto fail_set_log_prefix;
269 ret = sfc_vdpa_kvargs_parse(sva);
271 goto fail_kvargs_parse;
273 sfc_vdpa_log_init(sva, "entry");
275 sfc_vdpa_adapter_lock_init(sva);
277 sfc_vdpa_log_init(sva, "vfio init");
278 if (sfc_vdpa_vfio_setup(sva) < 0) {
279 sfc_vdpa_err(sva, "failed to setup device %s", pci_dev->name);
280 goto fail_vfio_setup;
283 sfc_vdpa_log_init(sva, "hw init");
284 if (sfc_vdpa_hw_init(sva) != 0) {
285 sfc_vdpa_err(sva, "failed to init HW %s", pci_dev->name);
289 sfc_vdpa_log_init(sva, "dev init");
290 sva->ops_data = sfc_vdpa_device_init(sva, SFC_VDPA_AS_VF);
291 if (sva->ops_data == NULL) {
292 sfc_vdpa_err(sva, "failed vDPA dev init %s", pci_dev->name);
296 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
297 TAILQ_INSERT_TAIL(&sfc_vdpa_adapter_list, sva, next);
298 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
300 sfc_vdpa_log_init(sva, "done");
305 sfc_vdpa_hw_fini(sva);
308 sfc_vdpa_vfio_teardown(sva);
311 sfc_vdpa_adapter_lock_fini(sva);
322 sfc_vdpa_pci_remove(struct rte_pci_device *pci_dev)
324 struct sfc_vdpa_adapter *sva = NULL;
326 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
329 sva = sfc_vdpa_get_adapter_by_dev(pci_dev);
331 SFC_VDPA_GENERIC_LOG(INFO,
332 "Invalid device: %s.", pci_dev->name);
336 pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
337 TAILQ_REMOVE(&sfc_vdpa_adapter_list, sva, next);
338 pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
340 sfc_vdpa_device_fini(sva->ops_data);
342 sfc_vdpa_hw_fini(sva);
344 sfc_vdpa_vfio_teardown(sva);
346 sfc_vdpa_adapter_lock_fini(sva);
353 static struct rte_pci_driver rte_sfc_vdpa = {
354 .id_table = pci_id_sfc_vdpa_efx_map,
356 .probe = sfc_vdpa_pci_probe,
357 .remove = sfc_vdpa_pci_remove,
360 RTE_PMD_REGISTER_PCI(net_sfc_vdpa, rte_sfc_vdpa);
361 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_vdpa, pci_id_sfc_vdpa_efx_map);
362 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_vdpa, "* vfio-pci");
363 RTE_LOG_REGISTER_SUFFIX(sfc_vdpa_logtype_driver, driver, NOTICE);