vdpa/mlx5: support features get operations
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <linux/virtio_net.h>
5
6 #include <rte_malloc.h>
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_bus_pci.h>
10 #ifdef PEDANTIC
11 #pragma GCC diagnostic ignored "-Wpedantic"
12 #endif
13 #include <rte_vdpa.h>
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic error "-Wpedantic"
16 #endif
17
18 #include <mlx5_glue.h>
19 #include <mlx5_common.h>
20 #include <mlx5_devx_cmds.h>
21 #include <mlx5_prm.h>
22
23 #include "mlx5_vdpa_utils.h"
24
25
26 struct mlx5_vdpa_priv {
27         TAILQ_ENTRY(mlx5_vdpa_priv) next;
28         int id; /* vDPA device id. */
29         struct ibv_context *ctx; /* Device context. */
30         struct rte_vdpa_dev_addr dev_addr;
31         struct mlx5_hca_vdpa_attr caps;
32 };
33
34 #ifndef VIRTIO_F_ORDER_PLATFORM
35 #define VIRTIO_F_ORDER_PLATFORM 36
36 #endif
37
38 #ifndef VIRTIO_F_RING_PACKED
39 #define VIRTIO_F_RING_PACKED 34
40 #endif
41
42 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
43                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
44                             (1ULL << VIRTIO_NET_F_MQ) | \
45                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
46                             (1ULL << VIRTIO_F_ORDER_PLATFORM))
47
48 #define MLX5_VDPA_PROTOCOL_FEATURES \
49                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
50                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
51                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
52                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
53                              (1ULL << VHOST_USER_PROTOCOL_F_MQ))
54
55 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
56                                               TAILQ_HEAD_INITIALIZER(priv_list);
57 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
58 int mlx5_vdpa_logtype;
59
60 static struct mlx5_vdpa_priv *
61 mlx5_vdpa_find_priv_resource_by_did(int did)
62 {
63         struct mlx5_vdpa_priv *priv;
64         int found = 0;
65
66         pthread_mutex_lock(&priv_list_lock);
67         TAILQ_FOREACH(priv, &priv_list, next) {
68                 if (did == priv->id) {
69                         found = 1;
70                         break;
71                 }
72         }
73         pthread_mutex_unlock(&priv_list_lock);
74         if (!found) {
75                 DRV_LOG(ERR, "Invalid device id: %d.", did);
76                 rte_errno = EINVAL;
77                 return NULL;
78         }
79         return priv;
80 }
81
82 static int
83 mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
84 {
85         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
86
87         if (priv == NULL) {
88                 DRV_LOG(ERR, "Invalid device id: %d.", did);
89                 return -1;
90         }
91         *queue_num = priv->caps.max_num_virtio_queues;
92         return 0;
93 }
94
95 static int
96 mlx5_vdpa_get_vdpa_features(int did, uint64_t *features)
97 {
98         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
99
100         if (priv == NULL) {
101                 DRV_LOG(ERR, "Invalid device id: %d.", did);
102                 return -1;
103         }
104         *features = MLX5_VDPA_DEFAULT_FEATURES;
105         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
106                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
107         if (priv->caps.tso_ipv4)
108                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
109         if (priv->caps.tso_ipv6)
110                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
111         if (priv->caps.tx_csum)
112                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
113         if (priv->caps.rx_csum)
114                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
115         if (priv->caps.virtio_version_1_0)
116                 *features |= (1ULL << VIRTIO_F_VERSION_1);
117         return 0;
118 }
119
120 static int
121 mlx5_vdpa_get_protocol_features(int did, uint64_t *features)
122 {
123         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
124
125         if (priv == NULL) {
126                 DRV_LOG(ERR, "Invalid device id: %d.", did);
127                 return -1;
128         }
129         *features = MLX5_VDPA_PROTOCOL_FEATURES;
130         return 0;
131 }
132
133 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
134         .get_queue_num = mlx5_vdpa_get_queue_num,
135         .get_features = mlx5_vdpa_get_vdpa_features,
136         .get_protocol_features = mlx5_vdpa_get_protocol_features,
137         .dev_conf = NULL,
138         .dev_close = NULL,
139         .set_vring_state = NULL,
140         .set_features = NULL,
141         .migration_done = NULL,
142         .get_vfio_group_fd = NULL,
143         .get_vfio_device_fd = NULL,
144         .get_notify_area = NULL,
145 };
146
147 /**
148  * DPDK callback to register a PCI device.
149  *
150  * This function spawns vdpa device out of a given PCI device.
151  *
152  * @param[in] pci_drv
153  *   PCI driver structure (mlx5_vpda_driver).
154  * @param[in] pci_dev
155  *   PCI device information.
156  *
157  * @return
158  *   0 on success, 1 to skip this driver, a negative errno value otherwise
159  *   and rte_errno is set.
160  */
161 static int
162 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
163                     struct rte_pci_device *pci_dev __rte_unused)
164 {
165         struct ibv_device **ibv_list;
166         struct ibv_device *ibv_match = NULL;
167         struct mlx5_vdpa_priv *priv = NULL;
168         struct ibv_context *ctx = NULL;
169         struct mlx5_hca_attr attr;
170         int ret;
171
172         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
173                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
174                         " driver.");
175                 return 1;
176         }
177         errno = 0;
178         ibv_list = mlx5_glue->get_device_list(&ret);
179         if (!ibv_list) {
180                 rte_errno = ENOSYS;
181                 DRV_LOG(ERR, "Failed to get device list, is ib_uverbs loaded?");
182                 return -rte_errno;
183         }
184         while (ret-- > 0) {
185                 struct rte_pci_addr pci_addr;
186
187                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[ret]->name);
188                 if (mlx5_dev_to_pci_addr(ibv_list[ret]->ibdev_path, &pci_addr))
189                         continue;
190                 if (pci_dev->addr.domain != pci_addr.domain ||
191                     pci_dev->addr.bus != pci_addr.bus ||
192                     pci_dev->addr.devid != pci_addr.devid ||
193                     pci_dev->addr.function != pci_addr.function)
194                         continue;
195                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
196                         ibv_list[ret]->name);
197                 ibv_match = ibv_list[ret];
198                 break;
199         }
200         mlx5_glue->free_device_list(ibv_list);
201         if (!ibv_match) {
202                 DRV_LOG(ERR, "No matching IB device for PCI slot "
203                         "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 ".",
204                         pci_dev->addr.domain, pci_dev->addr.bus,
205                         pci_dev->addr.devid, pci_dev->addr.function);
206                 rte_errno = ENOENT;
207                 return -rte_errno;
208         }
209         ctx = mlx5_glue->dv_open_device(ibv_match);
210         if (!ctx) {
211                 DRV_LOG(ERR, "Failed to open IB device \"%s\".",
212                         ibv_match->name);
213                 rte_errno = ENODEV;
214                 return -rte_errno;
215         }
216         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
217                            RTE_CACHE_LINE_SIZE);
218         if (!priv) {
219                 DRV_LOG(ERR, "Failed to allocate private memory.");
220                 rte_errno = ENOMEM;
221                 goto error;
222         }
223         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
224         if (ret) {
225                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
226                 rte_errno = ENOTSUP;
227                 goto error;
228         } else {
229                 if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
230                         DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
231                                 " maybe old FW/OFED version?");
232                         rte_errno = ENOTSUP;
233                         goto error;
234                 }
235                 priv->caps = attr.vdpa;
236         }
237         priv->ctx = ctx;
238         priv->dev_addr.pci_addr = pci_dev->addr;
239         priv->dev_addr.type = PCI_ADDR;
240         priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
241         if (priv->id < 0) {
242                 DRV_LOG(ERR, "Failed to register vDPA device.");
243                 rte_errno = rte_errno ? rte_errno : EINVAL;
244                 goto error;
245         }
246         pthread_mutex_lock(&priv_list_lock);
247         TAILQ_INSERT_TAIL(&priv_list, priv, next);
248         pthread_mutex_unlock(&priv_list_lock);
249         return 0;
250
251 error:
252         if (priv)
253                 rte_free(priv);
254         if (ctx)
255                 mlx5_glue->close_device(ctx);
256         return -rte_errno;
257 }
258
259 /**
260  * DPDK callback to remove a PCI device.
261  *
262  * This function removes all vDPA devices belong to a given PCI device.
263  *
264  * @param[in] pci_dev
265  *   Pointer to the PCI device.
266  *
267  * @return
268  *   0 on success, the function cannot fail.
269  */
270 static int
271 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
272 {
273         struct mlx5_vdpa_priv *priv = NULL;
274         int found = 0;
275
276         pthread_mutex_lock(&priv_list_lock);
277         TAILQ_FOREACH(priv, &priv_list, next) {
278                 if (memcmp(&priv->dev_addr.pci_addr, &pci_dev->addr,
279                            sizeof(pci_dev->addr)) == 0) {
280                         found = 1;
281                         break;
282                 }
283         }
284         if (found) {
285                 TAILQ_REMOVE(&priv_list, priv, next);
286                 mlx5_glue->close_device(priv->ctx);
287                 rte_free(priv);
288         }
289         pthread_mutex_unlock(&priv_list_lock);
290         return 0;
291 }
292
293 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
294         {
295                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
296                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
297         },
298         {
299                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
300                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
301         },
302         {
303                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
304                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
305         },
306         {
307                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
308                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
309         },
310         {
311                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
312                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
313         },
314         {
315                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
316                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
317         },
318         {
319                 .vendor_id = 0
320         }
321 };
322
323 static struct rte_pci_driver mlx5_vdpa_driver = {
324         .driver = {
325                 .name = "mlx5_vdpa",
326         },
327         .id_table = mlx5_vdpa_pci_id_map,
328         .probe = mlx5_vdpa_pci_probe,
329         .remove = mlx5_vdpa_pci_remove,
330         .drv_flags = 0,
331 };
332
333 /**
334  * Driver initialization routine.
335  */
336 RTE_INIT(rte_mlx5_vdpa_init)
337 {
338         /* Initialize common log type. */
339         mlx5_vdpa_logtype = rte_log_register("pmd.vdpa.mlx5");
340         if (mlx5_vdpa_logtype >= 0)
341                 rte_log_set_level(mlx5_vdpa_logtype, RTE_LOG_NOTICE);
342         if (mlx5_glue)
343                 rte_pci_register(&mlx5_vdpa_driver);
344 }
345
346 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
347 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
348 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");