vdpa/mlx5: prepare memory regions
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <linux/virtio_net.h>
5
6 #include <rte_malloc.h>
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_bus_pci.h>
10
11 #include <mlx5_glue.h>
12 #include <mlx5_common.h>
13 #include <mlx5_devx_cmds.h>
14 #include <mlx5_prm.h>
15
16 #include "mlx5_vdpa_utils.h"
17 #include "mlx5_vdpa.h"
18
19
20 #ifndef VIRTIO_F_ORDER_PLATFORM
21 #define VIRTIO_F_ORDER_PLATFORM 36
22 #endif
23
24 #ifndef VIRTIO_F_RING_PACKED
25 #define VIRTIO_F_RING_PACKED 34
26 #endif
27
28 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
29                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
30                             (1ULL << VIRTIO_NET_F_MQ) | \
31                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
32                             (1ULL << VIRTIO_F_ORDER_PLATFORM))
33
34 #define MLX5_VDPA_PROTOCOL_FEATURES \
35                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
36                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
37                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
38                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
39                              (1ULL << VHOST_USER_PROTOCOL_F_MQ))
40
41 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
42                                               TAILQ_HEAD_INITIALIZER(priv_list);
43 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
44 int mlx5_vdpa_logtype;
45
46 static struct mlx5_vdpa_priv *
47 mlx5_vdpa_find_priv_resource_by_did(int did)
48 {
49         struct mlx5_vdpa_priv *priv;
50         int found = 0;
51
52         pthread_mutex_lock(&priv_list_lock);
53         TAILQ_FOREACH(priv, &priv_list, next) {
54                 if (did == priv->id) {
55                         found = 1;
56                         break;
57                 }
58         }
59         pthread_mutex_unlock(&priv_list_lock);
60         if (!found) {
61                 DRV_LOG(ERR, "Invalid device id: %d.", did);
62                 rte_errno = EINVAL;
63                 return NULL;
64         }
65         return priv;
66 }
67
68 static int
69 mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
70 {
71         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
72
73         if (priv == NULL) {
74                 DRV_LOG(ERR, "Invalid device id: %d.", did);
75                 return -1;
76         }
77         *queue_num = priv->caps.max_num_virtio_queues;
78         return 0;
79 }
80
81 static int
82 mlx5_vdpa_get_vdpa_features(int did, uint64_t *features)
83 {
84         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
85
86         if (priv == NULL) {
87                 DRV_LOG(ERR, "Invalid device id: %d.", did);
88                 return -1;
89         }
90         *features = MLX5_VDPA_DEFAULT_FEATURES;
91         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
92                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
93         if (priv->caps.tso_ipv4)
94                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
95         if (priv->caps.tso_ipv6)
96                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
97         if (priv->caps.tx_csum)
98                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
99         if (priv->caps.rx_csum)
100                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
101         if (priv->caps.virtio_version_1_0)
102                 *features |= (1ULL << VIRTIO_F_VERSION_1);
103         return 0;
104 }
105
106 static int
107 mlx5_vdpa_get_protocol_features(int did, uint64_t *features)
108 {
109         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
110
111         if (priv == NULL) {
112                 DRV_LOG(ERR, "Invalid device id: %d.", did);
113                 return -1;
114         }
115         *features = MLX5_VDPA_PROTOCOL_FEATURES;
116         return 0;
117 }
118
119 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
120         .get_queue_num = mlx5_vdpa_get_queue_num,
121         .get_features = mlx5_vdpa_get_vdpa_features,
122         .get_protocol_features = mlx5_vdpa_get_protocol_features,
123         .dev_conf = NULL,
124         .dev_close = NULL,
125         .set_vring_state = NULL,
126         .set_features = NULL,
127         .migration_done = NULL,
128         .get_vfio_group_fd = NULL,
129         .get_vfio_device_fd = NULL,
130         .get_notify_area = NULL,
131 };
132
133 /**
134  * DPDK callback to register a PCI device.
135  *
136  * This function spawns vdpa device out of a given PCI device.
137  *
138  * @param[in] pci_drv
139  *   PCI driver structure (mlx5_vpda_driver).
140  * @param[in] pci_dev
141  *   PCI device information.
142  *
143  * @return
144  *   0 on success, 1 to skip this driver, a negative errno value otherwise
145  *   and rte_errno is set.
146  */
147 static int
148 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
149                     struct rte_pci_device *pci_dev __rte_unused)
150 {
151         struct ibv_device **ibv_list;
152         struct ibv_device *ibv_match = NULL;
153         struct mlx5_vdpa_priv *priv = NULL;
154         struct ibv_context *ctx = NULL;
155         struct mlx5_hca_attr attr;
156         int ret;
157
158         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
159                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
160                         " driver.");
161                 return 1;
162         }
163         errno = 0;
164         ibv_list = mlx5_glue->get_device_list(&ret);
165         if (!ibv_list) {
166                 rte_errno = ENOSYS;
167                 DRV_LOG(ERR, "Failed to get device list, is ib_uverbs loaded?");
168                 return -rte_errno;
169         }
170         while (ret-- > 0) {
171                 struct rte_pci_addr pci_addr;
172
173                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[ret]->name);
174                 if (mlx5_dev_to_pci_addr(ibv_list[ret]->ibdev_path, &pci_addr))
175                         continue;
176                 if (pci_dev->addr.domain != pci_addr.domain ||
177                     pci_dev->addr.bus != pci_addr.bus ||
178                     pci_dev->addr.devid != pci_addr.devid ||
179                     pci_dev->addr.function != pci_addr.function)
180                         continue;
181                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
182                         ibv_list[ret]->name);
183                 ibv_match = ibv_list[ret];
184                 break;
185         }
186         mlx5_glue->free_device_list(ibv_list);
187         if (!ibv_match) {
188                 DRV_LOG(ERR, "No matching IB device for PCI slot "
189                         "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 ".",
190                         pci_dev->addr.domain, pci_dev->addr.bus,
191                         pci_dev->addr.devid, pci_dev->addr.function);
192                 rte_errno = ENOENT;
193                 return -rte_errno;
194         }
195         ctx = mlx5_glue->dv_open_device(ibv_match);
196         if (!ctx) {
197                 DRV_LOG(ERR, "Failed to open IB device \"%s\".",
198                         ibv_match->name);
199                 rte_errno = ENODEV;
200                 return -rte_errno;
201         }
202         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
203                            RTE_CACHE_LINE_SIZE);
204         if (!priv) {
205                 DRV_LOG(ERR, "Failed to allocate private memory.");
206                 rte_errno = ENOMEM;
207                 goto error;
208         }
209         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
210         if (ret) {
211                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
212                 rte_errno = ENOTSUP;
213                 goto error;
214         } else {
215                 if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
216                         DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
217                                 " maybe old FW/OFED version?");
218                         rte_errno = ENOTSUP;
219                         goto error;
220                 }
221                 priv->caps = attr.vdpa;
222         }
223         priv->ctx = ctx;
224         priv->dev_addr.pci_addr = pci_dev->addr;
225         priv->dev_addr.type = PCI_ADDR;
226         priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
227         if (priv->id < 0) {
228                 DRV_LOG(ERR, "Failed to register vDPA device.");
229                 rte_errno = rte_errno ? rte_errno : EINVAL;
230                 goto error;
231         }
232         SLIST_INIT(&priv->mr_list);
233         pthread_mutex_lock(&priv_list_lock);
234         TAILQ_INSERT_TAIL(&priv_list, priv, next);
235         pthread_mutex_unlock(&priv_list_lock);
236         return 0;
237
238 error:
239         if (priv)
240                 rte_free(priv);
241         if (ctx)
242                 mlx5_glue->close_device(ctx);
243         return -rte_errno;
244 }
245
246 /**
247  * DPDK callback to remove a PCI device.
248  *
249  * This function removes all vDPA devices belong to a given PCI device.
250  *
251  * @param[in] pci_dev
252  *   Pointer to the PCI device.
253  *
254  * @return
255  *   0 on success, the function cannot fail.
256  */
257 static int
258 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
259 {
260         struct mlx5_vdpa_priv *priv = NULL;
261         int found = 0;
262
263         pthread_mutex_lock(&priv_list_lock);
264         TAILQ_FOREACH(priv, &priv_list, next) {
265                 if (memcmp(&priv->dev_addr.pci_addr, &pci_dev->addr,
266                            sizeof(pci_dev->addr)) == 0) {
267                         found = 1;
268                         break;
269                 }
270         }
271         if (found) {
272                 TAILQ_REMOVE(&priv_list, priv, next);
273                 mlx5_glue->close_device(priv->ctx);
274                 rte_free(priv);
275         }
276         pthread_mutex_unlock(&priv_list_lock);
277         return 0;
278 }
279
280 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
281         {
282                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
283                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
284         },
285         {
286                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
287                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
288         },
289         {
290                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
291                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
292         },
293         {
294                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
295                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
296         },
297         {
298                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
299                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
300         },
301         {
302                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
303                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
304         },
305         {
306                 .vendor_id = 0
307         }
308 };
309
310 static struct rte_pci_driver mlx5_vdpa_driver = {
311         .driver = {
312                 .name = "mlx5_vdpa",
313         },
314         .id_table = mlx5_vdpa_pci_id_map,
315         .probe = mlx5_vdpa_pci_probe,
316         .remove = mlx5_vdpa_pci_remove,
317         .drv_flags = 0,
318 };
319
320 /**
321  * Driver initialization routine.
322  */
323 RTE_INIT(rte_mlx5_vdpa_init)
324 {
325         /* Initialize common log type. */
326         mlx5_vdpa_logtype = rte_log_register("pmd.vdpa.mlx5");
327         if (mlx5_vdpa_logtype >= 0)
328                 rte_log_set_level(mlx5_vdpa_logtype, RTE_LOG_NOTICE);
329         if (mlx5_glue)
330                 rte_pci_register(&mlx5_vdpa_driver);
331 }
332
333 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
334 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
335 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");