vdpa/sfc: support device initialization
[dpdk.git] / drivers / vdpa / sfc / sfc_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 Xilinx, Inc.
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <sys/queue.h>
8
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
12 #include <rte_vfio.h>
13 #include <rte_vhost.h>
14
15 #include "efx.h"
16 #include "sfc_efx.h"
17 #include "sfc_vdpa.h"
18
19 TAILQ_HEAD(sfc_vdpa_adapter_list_head, sfc_vdpa_adapter);
20 static struct sfc_vdpa_adapter_list_head sfc_vdpa_adapter_list =
21         TAILQ_HEAD_INITIALIZER(sfc_vdpa_adapter_list);
22
23 static pthread_mutex_t sfc_vdpa_adapter_list_lock = PTHREAD_MUTEX_INITIALIZER;
24
25 struct sfc_vdpa_adapter *
26 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev)
27 {
28         bool found = false;
29         struct sfc_vdpa_adapter *sva;
30
31         pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
32
33         TAILQ_FOREACH(sva, &sfc_vdpa_adapter_list, next) {
34                 if (pdev == sva->pdev) {
35                         found = true;
36                         break;
37                 }
38         }
39
40         pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
41
42         return found ? sva : NULL;
43 }
44
45 static int
46 sfc_vdpa_vfio_setup(struct sfc_vdpa_adapter *sva)
47 {
48         struct rte_pci_device *dev = sva->pdev;
49         char dev_name[RTE_DEV_NAME_MAX_LEN] = {0};
50         int rc;
51
52         rte_pci_device_name(&dev->addr, dev_name, RTE_DEV_NAME_MAX_LEN);
53
54         sva->vfio_container_fd = rte_vfio_container_create();
55         if (sva->vfio_container_fd < 0) {
56                 sfc_vdpa_err(sva, "failed to create VFIO container");
57                 goto fail_container_create;
58         }
59
60         rc = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), dev_name,
61                                     &sva->iommu_group_num);
62         if (rc <= 0) {
63                 sfc_vdpa_err(sva, "failed to get IOMMU group for %s : %s",
64                              dev_name, rte_strerror(-rc));
65                 goto fail_get_group_num;
66         }
67
68         sva->vfio_group_fd =
69                 rte_vfio_container_group_bind(sva->vfio_container_fd,
70                                               sva->iommu_group_num);
71         if (sva->vfio_group_fd < 0) {
72                 sfc_vdpa_err(sva,
73                              "failed to bind IOMMU group %d to container %d",
74                              sva->iommu_group_num, sva->vfio_container_fd);
75                 goto fail_group_bind;
76         }
77
78         if (rte_pci_map_device(dev) != 0) {
79                 sfc_vdpa_err(sva, "failed to map PCI device %s : %s",
80                              dev_name, rte_strerror(rte_errno));
81                 goto fail_pci_map_device;
82         }
83
84         sva->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
85
86         return 0;
87
88 fail_pci_map_device:
89         if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
90                                         sva->iommu_group_num) != 0) {
91                 sfc_vdpa_err(sva,
92                              "failed to unbind IOMMU group %d from container %d",
93                              sva->iommu_group_num, sva->vfio_container_fd);
94         }
95
96 fail_group_bind:
97 fail_get_group_num:
98         if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
99                 sfc_vdpa_err(sva, "failed to destroy container %d",
100                              sva->vfio_container_fd);
101         }
102
103 fail_container_create:
104         return -1;
105 }
106
107 static void
108 sfc_vdpa_vfio_teardown(struct sfc_vdpa_adapter *sva)
109 {
110         rte_pci_unmap_device(sva->pdev);
111
112         if (rte_vfio_container_group_unbind(sva->vfio_container_fd,
113                                             sva->iommu_group_num) != 0) {
114                 sfc_vdpa_err(sva,
115                              "failed to unbind IOMMU group %d from container %d",
116                              sva->iommu_group_num, sva->vfio_container_fd);
117         }
118
119         if (rte_vfio_container_destroy(sva->vfio_container_fd) != 0) {
120                 sfc_vdpa_err(sva,
121                              "failed to destroy container %d",
122                              sva->vfio_container_fd);
123         }
124 }
125
126 static int
127 sfc_vdpa_set_log_prefix(struct sfc_vdpa_adapter *sva)
128 {
129         struct rte_pci_device *pci_dev = sva->pdev;
130         int ret;
131
132         ret = snprintf(sva->log_prefix, sizeof(sva->log_prefix),
133                        "PMD: sfc_vdpa " PCI_PRI_FMT " : ",
134                        pci_dev->addr.domain, pci_dev->addr.bus,
135                        pci_dev->addr.devid, pci_dev->addr.function);
136
137         if (ret < 0 || ret >= (int)sizeof(sva->log_prefix)) {
138                 SFC_VDPA_GENERIC_LOG(ERR,
139                         "reserved log prefix is too short for " PCI_PRI_FMT,
140                         pci_dev->addr.domain, pci_dev->addr.bus,
141                         pci_dev->addr.devid, pci_dev->addr.function);
142                 return -EINVAL;
143         }
144
145         return 0;
146 }
147
148 uint32_t
149 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
150                           const char *lt_prefix_str, uint32_t ll_default)
151 {
152         size_t lt_prefix_str_size = strlen(lt_prefix_str);
153         size_t lt_str_size_max;
154         char *lt_str = NULL;
155         int ret;
156
157         if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
158                 ++lt_prefix_str_size; /* Reserve space for prefix separator */
159                 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
160         } else {
161                 return RTE_LOGTYPE_PMD;
162         }
163
164         lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
165         if (lt_str == NULL)
166                 return RTE_LOGTYPE_PMD;
167
168         strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
169         lt_str[lt_prefix_str_size - 1] = '.';
170         rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
171                             lt_str_size_max - lt_prefix_str_size);
172         lt_str[lt_str_size_max - 1] = '\0';
173
174         ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
175         rte_free(lt_str);
176
177         return ret < 0 ? RTE_LOGTYPE_PMD : ret;
178 }
179
180 static struct rte_pci_id pci_id_sfc_vdpa_efx_map[] = {
181         { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD_VF) },
182         { .vendor_id = 0, /* sentinel */ },
183 };
184
185 static int
186 sfc_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
187         struct rte_pci_device *pci_dev)
188 {
189         struct sfc_vdpa_adapter *sva = NULL;
190         uint32_t logtype_main;
191         int ret = 0;
192
193         if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
194                         SFC_EFX_DEV_CLASS_VDPA) {
195                 SFC_VDPA_GENERIC_LOG(INFO,
196                         "Incompatible device class: skip probing, should be probed by other sfc driver.");
197                         return 1;
198         }
199
200         /*
201          * It will not be probed in the secondary process. As device class
202          * is vdpa so return 0 to avoid probe by other sfc driver
203          */
204         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
205                 return 0;
206
207         logtype_main = sfc_vdpa_register_logtype(&pci_dev->addr,
208                                                  SFC_VDPA_LOGTYPE_MAIN_STR,
209                                                  RTE_LOG_NOTICE);
210
211         sva = rte_zmalloc("sfc_vdpa", sizeof(struct sfc_vdpa_adapter), 0);
212         if (sva == NULL)
213                 goto fail_zmalloc;
214
215         sva->pdev = pci_dev;
216         sva->logtype_main = logtype_main;
217
218         ret = sfc_vdpa_set_log_prefix(sva);
219         if (ret != 0)
220                 goto fail_set_log_prefix;
221
222         sfc_vdpa_log_init(sva, "entry");
223
224         sfc_vdpa_log_init(sva, "vfio init");
225         if (sfc_vdpa_vfio_setup(sva) < 0) {
226                 sfc_vdpa_err(sva, "failed to setup device %s", pci_dev->name);
227                 goto fail_vfio_setup;
228         }
229
230         sfc_vdpa_log_init(sva, "hw init");
231         if (sfc_vdpa_hw_init(sva) != 0) {
232                 sfc_vdpa_err(sva, "failed to init HW %s", pci_dev->name);
233                 goto fail_hw_init;
234         }
235
236         sfc_vdpa_log_init(sva, "dev init");
237         sva->ops_data = sfc_vdpa_device_init(sva, SFC_VDPA_AS_VF);
238         if (sva->ops_data == NULL) {
239                 sfc_vdpa_err(sva, "failed vDPA dev init %s", pci_dev->name);
240                 goto fail_dev_init;
241         }
242
243         pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
244         TAILQ_INSERT_TAIL(&sfc_vdpa_adapter_list, sva, next);
245         pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
246
247         sfc_vdpa_log_init(sva, "done");
248
249         return 0;
250
251 fail_dev_init:
252         sfc_vdpa_hw_fini(sva);
253
254 fail_hw_init:
255         sfc_vdpa_vfio_teardown(sva);
256
257 fail_vfio_setup:
258 fail_set_log_prefix:
259         rte_free(sva);
260
261 fail_zmalloc:
262         return -1;
263 }
264
265 static int
266 sfc_vdpa_pci_remove(struct rte_pci_device *pci_dev)
267 {
268         struct sfc_vdpa_adapter *sva = NULL;
269
270         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
271                 return -1;
272
273         sva = sfc_vdpa_get_adapter_by_dev(pci_dev);
274         if (sva == NULL) {
275                 sfc_vdpa_info(sva, "invalid device: %s", pci_dev->name);
276                 return -1;
277         }
278
279         pthread_mutex_lock(&sfc_vdpa_adapter_list_lock);
280         TAILQ_REMOVE(&sfc_vdpa_adapter_list, sva, next);
281         pthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);
282
283         sfc_vdpa_device_fini(sva->ops_data);
284
285         sfc_vdpa_hw_fini(sva);
286
287         sfc_vdpa_vfio_teardown(sva);
288
289         rte_free(sva);
290
291         return 0;
292 }
293
294 static struct rte_pci_driver rte_sfc_vdpa = {
295         .id_table = pci_id_sfc_vdpa_efx_map,
296         .drv_flags = 0,
297         .probe = sfc_vdpa_pci_probe,
298         .remove = sfc_vdpa_pci_remove,
299 };
300
301 RTE_PMD_REGISTER_PCI(net_sfc_vdpa, rte_sfc_vdpa);
302 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_vdpa, pci_id_sfc_vdpa_efx_map);
303 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_vdpa, "* vfio-pci");
304 RTE_LOG_REGISTER_SUFFIX(sfc_vdpa_logtype_driver, driver, NOTICE);