vdpa/mlx5: support stateless offloads
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <rte_malloc.h>
5 #include <rte_log.h>
6 #include <rte_errno.h>
7 #include <rte_bus_pci.h>
8
9 #include <mlx5_glue.h>
10 #include <mlx5_common.h>
11 #include <mlx5_devx_cmds.h>
12 #include <mlx5_prm.h>
13
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16
17
18 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
19                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
20                             (1ULL << VIRTIO_NET_F_MQ) | \
21                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
22                             (1ULL << VIRTIO_F_ORDER_PLATFORM))
23
24 #define MLX5_VDPA_PROTOCOL_FEATURES \
25                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
26                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
27                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
28                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
29                              (1ULL << VHOST_USER_PROTOCOL_F_MQ))
30
31 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
32                                               TAILQ_HEAD_INITIALIZER(priv_list);
33 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
34 int mlx5_vdpa_logtype;
35
36 static struct mlx5_vdpa_priv *
37 mlx5_vdpa_find_priv_resource_by_did(int did)
38 {
39         struct mlx5_vdpa_priv *priv;
40         int found = 0;
41
42         pthread_mutex_lock(&priv_list_lock);
43         TAILQ_FOREACH(priv, &priv_list, next) {
44                 if (did == priv->id) {
45                         found = 1;
46                         break;
47                 }
48         }
49         pthread_mutex_unlock(&priv_list_lock);
50         if (!found) {
51                 DRV_LOG(ERR, "Invalid device id: %d.", did);
52                 rte_errno = EINVAL;
53                 return NULL;
54         }
55         return priv;
56 }
57
58 static int
59 mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
60 {
61         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
62
63         if (priv == NULL) {
64                 DRV_LOG(ERR, "Invalid device id: %d.", did);
65                 return -1;
66         }
67         *queue_num = priv->caps.max_num_virtio_queues;
68         return 0;
69 }
70
71 static int
72 mlx5_vdpa_get_vdpa_features(int did, uint64_t *features)
73 {
74         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
75
76         if (priv == NULL) {
77                 DRV_LOG(ERR, "Invalid device id: %d.", did);
78                 return -1;
79         }
80         *features = MLX5_VDPA_DEFAULT_FEATURES;
81         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
82                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
83         if (priv->caps.tso_ipv4)
84                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
85         if (priv->caps.tso_ipv6)
86                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
87         if (priv->caps.tx_csum)
88                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
89         if (priv->caps.rx_csum)
90                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
91         if (priv->caps.virtio_version_1_0)
92                 *features |= (1ULL << VIRTIO_F_VERSION_1);
93         return 0;
94 }
95
96 static int
97 mlx5_vdpa_get_protocol_features(int did, uint64_t *features)
98 {
99         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
100
101         if (priv == NULL) {
102                 DRV_LOG(ERR, "Invalid device id: %d.", did);
103                 return -1;
104         }
105         *features = MLX5_VDPA_PROTOCOL_FEATURES;
106         return 0;
107 }
108
109 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
110         .get_queue_num = mlx5_vdpa_get_queue_num,
111         .get_features = mlx5_vdpa_get_vdpa_features,
112         .get_protocol_features = mlx5_vdpa_get_protocol_features,
113         .dev_conf = NULL,
114         .dev_close = NULL,
115         .set_vring_state = NULL,
116         .set_features = NULL,
117         .migration_done = NULL,
118         .get_vfio_group_fd = NULL,
119         .get_vfio_device_fd = NULL,
120         .get_notify_area = NULL,
121 };
122
123 /**
124  * DPDK callback to register a PCI device.
125  *
126  * This function spawns vdpa device out of a given PCI device.
127  *
128  * @param[in] pci_drv
129  *   PCI driver structure (mlx5_vpda_driver).
130  * @param[in] pci_dev
131  *   PCI device information.
132  *
133  * @return
134  *   0 on success, 1 to skip this driver, a negative errno value otherwise
135  *   and rte_errno is set.
136  */
137 static int
138 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
139                     struct rte_pci_device *pci_dev __rte_unused)
140 {
141         struct ibv_device **ibv_list;
142         struct ibv_device *ibv_match = NULL;
143         struct mlx5_vdpa_priv *priv = NULL;
144         struct ibv_context *ctx = NULL;
145         struct mlx5_hca_attr attr;
146         int ret;
147
148         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
149                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
150                         " driver.");
151                 return 1;
152         }
153         errno = 0;
154         ibv_list = mlx5_glue->get_device_list(&ret);
155         if (!ibv_list) {
156                 rte_errno = ENOSYS;
157                 DRV_LOG(ERR, "Failed to get device list, is ib_uverbs loaded?");
158                 return -rte_errno;
159         }
160         while (ret-- > 0) {
161                 struct rte_pci_addr pci_addr;
162
163                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[ret]->name);
164                 if (mlx5_dev_to_pci_addr(ibv_list[ret]->ibdev_path, &pci_addr))
165                         continue;
166                 if (pci_dev->addr.domain != pci_addr.domain ||
167                     pci_dev->addr.bus != pci_addr.bus ||
168                     pci_dev->addr.devid != pci_addr.devid ||
169                     pci_dev->addr.function != pci_addr.function)
170                         continue;
171                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
172                         ibv_list[ret]->name);
173                 ibv_match = ibv_list[ret];
174                 break;
175         }
176         mlx5_glue->free_device_list(ibv_list);
177         if (!ibv_match) {
178                 DRV_LOG(ERR, "No matching IB device for PCI slot "
179                         "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 ".",
180                         pci_dev->addr.domain, pci_dev->addr.bus,
181                         pci_dev->addr.devid, pci_dev->addr.function);
182                 rte_errno = ENOENT;
183                 return -rte_errno;
184         }
185         ctx = mlx5_glue->dv_open_device(ibv_match);
186         if (!ctx) {
187                 DRV_LOG(ERR, "Failed to open IB device \"%s\".",
188                         ibv_match->name);
189                 rte_errno = ENODEV;
190                 return -rte_errno;
191         }
192         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
193                            RTE_CACHE_LINE_SIZE);
194         if (!priv) {
195                 DRV_LOG(ERR, "Failed to allocate private memory.");
196                 rte_errno = ENOMEM;
197                 goto error;
198         }
199         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
200         if (ret) {
201                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
202                 rte_errno = ENOTSUP;
203                 goto error;
204         } else {
205                 if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
206                         DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
207                                 " maybe old FW/OFED version?");
208                         rte_errno = ENOTSUP;
209                         goto error;
210                 }
211                 priv->caps = attr.vdpa;
212         }
213         priv->ctx = ctx;
214         priv->dev_addr.pci_addr = pci_dev->addr;
215         priv->dev_addr.type = PCI_ADDR;
216         priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
217         if (priv->id < 0) {
218                 DRV_LOG(ERR, "Failed to register vDPA device.");
219                 rte_errno = rte_errno ? rte_errno : EINVAL;
220                 goto error;
221         }
222         SLIST_INIT(&priv->mr_list);
223         SLIST_INIT(&priv->virtq_list);
224         pthread_mutex_lock(&priv_list_lock);
225         TAILQ_INSERT_TAIL(&priv_list, priv, next);
226         pthread_mutex_unlock(&priv_list_lock);
227         return 0;
228
229 error:
230         if (priv)
231                 rte_free(priv);
232         if (ctx)
233                 mlx5_glue->close_device(ctx);
234         return -rte_errno;
235 }
236
237 /**
238  * DPDK callback to remove a PCI device.
239  *
240  * This function removes all vDPA devices belong to a given PCI device.
241  *
242  * @param[in] pci_dev
243  *   Pointer to the PCI device.
244  *
245  * @return
246  *   0 on success, the function cannot fail.
247  */
248 static int
249 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
250 {
251         struct mlx5_vdpa_priv *priv = NULL;
252         int found = 0;
253
254         pthread_mutex_lock(&priv_list_lock);
255         TAILQ_FOREACH(priv, &priv_list, next) {
256                 if (memcmp(&priv->dev_addr.pci_addr, &pci_dev->addr,
257                            sizeof(pci_dev->addr)) == 0) {
258                         found = 1;
259                         break;
260                 }
261         }
262         if (found) {
263                 TAILQ_REMOVE(&priv_list, priv, next);
264                 mlx5_glue->close_device(priv->ctx);
265                 rte_free(priv);
266         }
267         pthread_mutex_unlock(&priv_list_lock);
268         return 0;
269 }
270
271 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
272         {
273                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
274                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
275         },
276         {
277                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
278                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
279         },
280         {
281                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
282                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
283         },
284         {
285                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
286                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
287         },
288         {
289                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
290                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
291         },
292         {
293                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
294                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
295         },
296         {
297                 .vendor_id = 0
298         }
299 };
300
301 static struct rte_pci_driver mlx5_vdpa_driver = {
302         .driver = {
303                 .name = "mlx5_vdpa",
304         },
305         .id_table = mlx5_vdpa_pci_id_map,
306         .probe = mlx5_vdpa_pci_probe,
307         .remove = mlx5_vdpa_pci_remove,
308         .drv_flags = 0,
309 };
310
311 /**
312  * Driver initialization routine.
313  */
314 RTE_INIT(rte_mlx5_vdpa_init)
315 {
316         /* Initialize common log type. */
317         mlx5_vdpa_logtype = rte_log_register("pmd.vdpa.mlx5");
318         if (mlx5_vdpa_logtype >= 0)
319                 rte_log_set_level(mlx5_vdpa_logtype, RTE_LOG_NOTICE);
320         if (mlx5_glue)
321                 rte_pci_register(&mlx5_vdpa_driver);
322 }
323
324 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
325 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
326 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");