vdpa/mlx5: support timestamp format
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <net/if.h>
6 #include <sys/socket.h>
7 #include <sys/ioctl.h>
8 #include <fcntl.h>
9 #include <netinet/in.h>
10
11 #include <rte_malloc.h>
12 #include <rte_log.h>
13 #include <rte_errno.h>
14 #include <rte_pci.h>
15 #include <rte_string_fns.h>
16
17 #include <mlx5_glue.h>
18 #include <mlx5_common.h>
19 #include <mlx5_common_pci.h>
20 #include <mlx5_devx_cmds.h>
21 #include <mlx5_prm.h>
22 #include <mlx5_nl.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
29                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
30                             (1ULL << VIRTIO_NET_F_MQ) | \
31                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
32                             (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
33                             (1ULL << VHOST_F_LOG_ALL) | \
34                             (1ULL << VIRTIO_NET_F_MTU))
35
36 #define MLX5_VDPA_PROTOCOL_FEATURES \
37                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
38                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
39                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
40                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
41                              (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
42                              (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
43                              (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
44
45 #define MLX5_VDPA_MAX_RETRIES 20
46 #define MLX5_VDPA_USEC 1000
47 #define MLX5_VDPA_DEFAULT_NO_TRAFFIC_TIME_S 2LLU
48
49 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
50                                               TAILQ_HEAD_INITIALIZER(priv_list);
51 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
52
53 static struct mlx5_vdpa_priv *
54 mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
55 {
56         struct mlx5_vdpa_priv *priv;
57         int found = 0;
58
59         pthread_mutex_lock(&priv_list_lock);
60         TAILQ_FOREACH(priv, &priv_list, next) {
61                 if (vdev == priv->vdev) {
62                         found = 1;
63                         break;
64                 }
65         }
66         pthread_mutex_unlock(&priv_list_lock);
67         if (!found) {
68                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
69                 rte_errno = EINVAL;
70                 return NULL;
71         }
72         return priv;
73 }
74
75 static int
76 mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
77 {
78         struct mlx5_vdpa_priv *priv =
79                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
80
81         if (priv == NULL) {
82                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
83                 return -1;
84         }
85         *queue_num = priv->caps.max_num_virtio_queues;
86         return 0;
87 }
88
89 static int
90 mlx5_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
91 {
92         struct mlx5_vdpa_priv *priv =
93                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
94
95         if (priv == NULL) {
96                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
97                 return -1;
98         }
99         *features = MLX5_VDPA_DEFAULT_FEATURES;
100         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
101                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
102         if (priv->caps.tso_ipv4)
103                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
104         if (priv->caps.tso_ipv6)
105                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
106         if (priv->caps.tx_csum)
107                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
108         if (priv->caps.rx_csum)
109                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
110         if (priv->caps.virtio_version_1_0)
111                 *features |= (1ULL << VIRTIO_F_VERSION_1);
112         return 0;
113 }
114
115 static int
116 mlx5_vdpa_get_protocol_features(struct rte_vdpa_device *vdev,
117                 uint64_t *features)
118 {
119         struct mlx5_vdpa_priv *priv =
120                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
121
122         if (priv == NULL) {
123                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
124                 return -1;
125         }
126         *features = MLX5_VDPA_PROTOCOL_FEATURES;
127         return 0;
128 }
129
130 static int
131 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
132 {
133         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
134         struct mlx5_vdpa_priv *priv =
135                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
136         int ret;
137
138         if (priv == NULL) {
139                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
140                 return -EINVAL;
141         }
142         if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
143                 DRV_LOG(ERR, "Too big vring id: %d.", vring);
144                 return -E2BIG;
145         }
146         pthread_mutex_lock(&priv->vq_config_lock);
147         ret = mlx5_vdpa_virtq_enable(priv, vring, state);
148         pthread_mutex_unlock(&priv->vq_config_lock);
149         return ret;
150 }
151
152 static int
153 mlx5_vdpa_features_set(int vid)
154 {
155         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
156         struct mlx5_vdpa_priv *priv =
157                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
158         uint64_t log_base, log_size;
159         uint64_t features;
160         int ret;
161
162         if (priv == NULL) {
163                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
164                 return -EINVAL;
165         }
166         ret = rte_vhost_get_negotiated_features(vid, &features);
167         if (ret) {
168                 DRV_LOG(ERR, "Failed to get negotiated features.");
169                 return ret;
170         }
171         if (RTE_VHOST_NEED_LOG(features)) {
172                 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
173                 if (ret) {
174                         DRV_LOG(ERR, "Failed to get log base.");
175                         return ret;
176                 }
177                 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
178                 if (ret) {
179                         DRV_LOG(ERR, "Failed to set dirty bitmap.");
180                         return ret;
181                 }
182                 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
183                 ret = mlx5_vdpa_logging_enable(priv, 1);
184                 if (ret) {
185                         DRV_LOG(ERR, "Failed t enable dirty logging.");
186                         return ret;
187                 }
188         }
189         return 0;
190 }
191
192 static int
193 mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
194 {
195 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
196         priv->pd = mlx5_glue->alloc_pd(priv->ctx);
197         if (priv->pd == NULL) {
198                 DRV_LOG(ERR, "Failed to allocate PD.");
199                 return errno ? -errno : -ENOMEM;
200         }
201         struct mlx5dv_obj obj;
202         struct mlx5dv_pd pd_info;
203         int ret = 0;
204
205         obj.pd.in = priv->pd;
206         obj.pd.out = &pd_info;
207         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
208         if (ret) {
209                 DRV_LOG(ERR, "Fail to get PD object info.");
210                 mlx5_glue->dealloc_pd(priv->pd);
211                 priv->pd = NULL;
212                 return -errno;
213         }
214         priv->pdn = pd_info.pdn;
215         return 0;
216 #else
217         (void)priv;
218         DRV_LOG(ERR, "Cannot get pdn - no DV support.");
219         return -ENOTSUP;
220 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
221 }
222
223 static int
224 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
225 {
226         struct ifreq request;
227         uint16_t vhost_mtu = 0;
228         uint16_t kern_mtu = 0;
229         int ret = rte_vhost_get_mtu(priv->vid, &vhost_mtu);
230         int sock;
231         int retries = MLX5_VDPA_MAX_RETRIES;
232
233         if (ret) {
234                 DRV_LOG(DEBUG, "Cannot get vhost MTU - %d.", ret);
235                 return ret;
236         }
237         if (!vhost_mtu) {
238                 DRV_LOG(DEBUG, "Vhost MTU is 0.");
239                 return ret;
240         }
241         ret = mlx5_get_ifname_sysfs(priv->ctx->device->ibdev_path,
242                                     request.ifr_name);
243         if (ret) {
244                 DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
245                 return ret;
246         }
247         sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
248         if (sock == -1) {
249                 DRV_LOG(DEBUG, "Cannot open IF socket.");
250                 return sock;
251         }
252         while (retries--) {
253                 ret = ioctl(sock, SIOCGIFMTU, &request);
254                 if (ret == -1)
255                         break;
256                 kern_mtu = request.ifr_mtu;
257                 DRV_LOG(DEBUG, "MTU: current %d requested %d.", (int)kern_mtu,
258                         (int)vhost_mtu);
259                 if (kern_mtu == vhost_mtu)
260                         break;
261                 request.ifr_mtu = vhost_mtu;
262                 ret = ioctl(sock, SIOCSIFMTU, &request);
263                 if (ret == -1)
264                         break;
265                 request.ifr_mtu = 0;
266                 usleep(MLX5_VDPA_USEC);
267         }
268         close(sock);
269         return kern_mtu == vhost_mtu ? 0 : -1;
270 }
271
272 static int
273 mlx5_vdpa_dev_close(int vid)
274 {
275         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
276         struct mlx5_vdpa_priv *priv =
277                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
278         int ret = 0;
279
280         if (priv == NULL) {
281                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
282                 return -1;
283         }
284         if (priv->configured)
285                 ret |= mlx5_vdpa_lm_log(priv);
286         mlx5_vdpa_err_event_unset(priv);
287         mlx5_vdpa_cqe_event_unset(priv);
288         mlx5_vdpa_steer_unset(priv);
289         mlx5_vdpa_virtqs_release(priv);
290         mlx5_vdpa_event_qp_global_release(priv);
291         mlx5_vdpa_mem_dereg(priv);
292         if (priv->pd) {
293                 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
294                 priv->pd = NULL;
295         }
296         priv->configured = 0;
297         priv->vid = 0;
298         /* The mutex may stay locked after event thread cancel - initiate it. */
299         pthread_mutex_init(&priv->vq_config_lock, NULL);
300         DRV_LOG(INFO, "vDPA device %d was closed.", vid);
301         return ret;
302 }
303
304 static int
305 mlx5_vdpa_dev_config(int vid)
306 {
307         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
308         struct mlx5_vdpa_priv *priv =
309                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
310
311         if (priv == NULL) {
312                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
313                 return -EINVAL;
314         }
315         if (priv->configured && mlx5_vdpa_dev_close(vid)) {
316                 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
317                 return -1;
318         }
319         priv->vid = vid;
320         if (mlx5_vdpa_mtu_set(priv))
321                 DRV_LOG(WARNING, "MTU cannot be set on device %s.",
322                                 vdev->device->name);
323         if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
324             mlx5_vdpa_err_event_setup(priv) ||
325             mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
326             mlx5_vdpa_cqe_event_setup(priv)) {
327                 mlx5_vdpa_dev_close(vid);
328                 return -1;
329         }
330         priv->configured = 1;
331         DRV_LOG(INFO, "vDPA device %d was configured.", vid);
332         return 0;
333 }
334
335 static int
336 mlx5_vdpa_get_device_fd(int vid)
337 {
338         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
339         struct mlx5_vdpa_priv *priv =
340                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
341
342         if (priv == NULL) {
343                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
344                 return -EINVAL;
345         }
346         return priv->ctx->cmd_fd;
347 }
348
349 static int
350 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
351 {
352         struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
353         struct mlx5_vdpa_priv *priv =
354                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
355
356         RTE_SET_USED(qid);
357         if (priv == NULL) {
358                 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
359                 return -EINVAL;
360         }
361         if (!priv->var) {
362                 DRV_LOG(ERR, "VAR was not created for device %s, is the device"
363                         " configured?.", vdev->device->name);
364                 return -EINVAL;
365         }
366         *offset = priv->var->mmap_off;
367         *size = priv->var->length;
368         return 0;
369 }
370
371 static int
372 mlx5_vdpa_get_stats_names(struct rte_vdpa_device *vdev,
373                 struct rte_vdpa_stat_name *stats_names,
374                 unsigned int size)
375 {
376         static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
377                 "received_descriptors",
378                 "completed_descriptors",
379                 "bad descriptor errors",
380                 "exceed max chain",
381                 "invalid buffer",
382                 "completion errors",
383         };
384         struct mlx5_vdpa_priv *priv =
385                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
386         unsigned int i;
387
388         if (priv == NULL) {
389                 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
390                 return -ENODEV;
391         }
392         if (!stats_names)
393                 return MLX5_VDPA_STATS_MAX;
394         size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
395         for (i = 0; i < size; ++i)
396                 strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
397                         RTE_VDPA_STATS_NAME_SIZE);
398         return size;
399 }
400
401 static int
402 mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
403                 struct rte_vdpa_stat *stats, unsigned int n)
404 {
405         struct mlx5_vdpa_priv *priv =
406                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
407
408         if (priv == NULL) {
409                 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
410                 return -ENODEV;
411         }
412         if (!priv->configured) {
413                 DRV_LOG(ERR, "Device %s was not configured.",
414                                 vdev->device->name);
415                 return -ENODATA;
416         }
417         if (qid >= (int)priv->nr_virtqs) {
418                 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
419                                 vdev->device->name);
420                 return -E2BIG;
421         }
422         if (!priv->caps.queue_counters_valid) {
423                 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
424                         vdev->device->name);
425                 return -ENOTSUP;
426         }
427         return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
428 }
429
430 static int
431 mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
432 {
433         struct mlx5_vdpa_priv *priv =
434                 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
435
436         if (priv == NULL) {
437                 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
438                 return -ENODEV;
439         }
440         if (!priv->configured) {
441                 DRV_LOG(ERR, "Device %s was not configured.",
442                                 vdev->device->name);
443                 return -ENODATA;
444         }
445         if (qid >= (int)priv->nr_virtqs) {
446                 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
447                                 vdev->device->name);
448                 return -E2BIG;
449         }
450         if (!priv->caps.queue_counters_valid) {
451                 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
452                         vdev->device->name);
453                 return -ENOTSUP;
454         }
455         return mlx5_vdpa_virtq_stats_reset(priv, qid);
456 }
457
458 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
459         .get_queue_num = mlx5_vdpa_get_queue_num,
460         .get_features = mlx5_vdpa_get_vdpa_features,
461         .get_protocol_features = mlx5_vdpa_get_protocol_features,
462         .dev_conf = mlx5_vdpa_dev_config,
463         .dev_close = mlx5_vdpa_dev_close,
464         .set_vring_state = mlx5_vdpa_set_vring_state,
465         .set_features = mlx5_vdpa_features_set,
466         .migration_done = NULL,
467         .get_vfio_group_fd = NULL,
468         .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
469         .get_notify_area = mlx5_vdpa_get_notify_area,
470         .get_stats_names = mlx5_vdpa_get_stats_names,
471         .get_stats = mlx5_vdpa_get_stats,
472         .reset_stats = mlx5_vdpa_reset_stats,
473 };
474
475 static struct ibv_device *
476 mlx5_vdpa_get_ib_device_match(struct rte_pci_addr *addr)
477 {
478         int n;
479         struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
480         struct ibv_device *ibv_match = NULL;
481
482         if (!ibv_list) {
483                 rte_errno = ENOSYS;
484                 return NULL;
485         }
486         while (n-- > 0) {
487                 struct rte_pci_addr pci_addr;
488
489                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
490                 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &pci_addr))
491                         continue;
492                 if (rte_pci_addr_cmp(addr, &pci_addr))
493                         continue;
494                 ibv_match = ibv_list[n];
495                 break;
496         }
497         if (!ibv_match)
498                 rte_errno = ENOENT;
499         mlx5_glue->free_device_list(ibv_list);
500         return ibv_match;
501 }
502
503 /* Try to disable ROCE by Netlink\Devlink. */
504 static int
505 mlx5_vdpa_nl_roce_disable(const char *addr)
506 {
507         int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
508         int devlink_id;
509         int enable;
510         int ret;
511
512         if (nlsk_fd < 0)
513                 return nlsk_fd;
514         devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
515         if (devlink_id < 0) {
516                 ret = devlink_id;
517                 DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
518                         " Netlink.");
519                 goto close;
520         }
521         ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
522         if (ret) {
523                 DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
524                         ret);
525                 goto close;
526         } else if (!enable) {
527                 DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
528                 goto close;
529         }
530         ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
531         if (ret)
532                 DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
533         else
534                 DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
535 close:
536         close(nlsk_fd);
537         return ret;
538 }
539
540 /* Try to disable ROCE by sysfs. */
541 static int
542 mlx5_vdpa_sys_roce_disable(const char *addr)
543 {
544         FILE *file_o;
545         int enable;
546         int ret;
547
548         MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
549         file_o = fopen(file_p, "rb");
550         if (!file_o) {
551                 rte_errno = ENOTSUP;
552                 return -ENOTSUP;
553         }
554         ret = fscanf(file_o, "%d", &enable);
555         if (ret != 1) {
556                 rte_errno = EINVAL;
557                 ret = EINVAL;
558                 goto close;
559         } else if (!enable) {
560                 ret = 0;
561                 DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
562                 goto close;
563         }
564         fclose(file_o);
565         file_o = fopen(file_p, "wb");
566         if (!file_o) {
567                 rte_errno = ENOTSUP;
568                 return -ENOTSUP;
569         }
570         fprintf(file_o, "0\n");
571         ret = 0;
572 close:
573         if (ret)
574                 DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
575         else
576                 DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
577         fclose(file_o);
578         return ret;
579 }
580
581 static int
582 mlx5_vdpa_roce_disable(struct rte_pci_addr *addr, struct ibv_device **ibv)
583 {
584         char addr_name[64] = {0};
585
586         rte_pci_device_name(addr, addr_name, sizeof(addr_name));
587         /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
588         if (mlx5_vdpa_nl_roce_disable(addr_name) == 0 ||
589             mlx5_vdpa_sys_roce_disable(addr_name) == 0) {
590                 /*
591                  * Succeed to disable ROCE, wait for the IB device to appear
592                  * again after reload.
593                  */
594                 int r;
595                 struct ibv_device *ibv_new;
596
597                 for (r = MLX5_VDPA_MAX_RETRIES; r; r--) {
598                         ibv_new = mlx5_vdpa_get_ib_device_match(addr);
599                         if (ibv_new) {
600                                 *ibv = ibv_new;
601                                 return 0;
602                         }
603                         usleep(MLX5_VDPA_USEC);
604                 }
605                 DRV_LOG(ERR, "Cannot much device %s after ROCE disable, "
606                         "retries exceed %d", addr_name, MLX5_VDPA_MAX_RETRIES);
607                 rte_errno = EAGAIN;
608         }
609         return -rte_errno;
610 }
611
612 static int
613 mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
614 {
615         struct mlx5_vdpa_priv *priv = opaque;
616         unsigned long tmp;
617         int n_cores = sysconf(_SC_NPROCESSORS_ONLN);
618
619         if (strcmp(key, "class") == 0)
620                 return 0;
621         errno = 0;
622         tmp = strtoul(val, NULL, 0);
623         if (errno) {
624                 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
625                 return -errno;
626         }
627         if (strcmp(key, "event_mode") == 0) {
628                 if (tmp <= MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT)
629                         priv->event_mode = (int)tmp;
630                 else
631                         DRV_LOG(WARNING, "Invalid event_mode %s.", val);
632         } else if (strcmp(key, "event_us") == 0) {
633                 priv->event_us = (uint32_t)tmp;
634         } else if (strcmp(key, "no_traffic_time") == 0) {
635                 priv->no_traffic_time_s = (uint32_t)tmp;
636         } else if (strcmp(key, "event_core") == 0) {
637                 if (tmp >= (unsigned long)n_cores)
638                         DRV_LOG(WARNING, "Invalid event_core %s.", val);
639                 else
640                         priv->event_core = tmp;
641         } else if (strcmp(key, "hw_latency_mode") == 0) {
642                 priv->hw_latency_mode = (uint32_t)tmp;
643         } else if (strcmp(key, "hw_max_latency_us") == 0) {
644                 priv->hw_max_latency_us = (uint32_t)tmp;
645         } else if (strcmp(key, "hw_max_pending_comp") == 0) {
646                 priv->hw_max_pending_comp = (uint32_t)tmp;
647         } else {
648                 DRV_LOG(WARNING, "Invalid key %s.", key);
649         }
650         return 0;
651 }
652
653 static void
654 mlx5_vdpa_config_get(struct rte_devargs *devargs, struct mlx5_vdpa_priv *priv)
655 {
656         struct rte_kvargs *kvlist;
657
658         priv->event_mode = MLX5_VDPA_EVENT_MODE_FIXED_TIMER;
659         priv->event_us = 0;
660         priv->event_core = -1;
661         priv->no_traffic_time_s = MLX5_VDPA_DEFAULT_NO_TRAFFIC_TIME_S;
662         if (devargs == NULL)
663                 return;
664         kvlist = rte_kvargs_parse(devargs->args, NULL);
665         if (kvlist == NULL)
666                 return;
667         rte_kvargs_process(kvlist, NULL, mlx5_vdpa_args_check_handler, priv);
668         rte_kvargs_free(kvlist);
669         if (!priv->event_us &&
670             priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER)
671                 priv->event_us = MLX5_VDPA_DEFAULT_TIMER_STEP_US;
672         DRV_LOG(DEBUG, "event mode is %d.", priv->event_mode);
673         DRV_LOG(DEBUG, "event_us is %u us.", priv->event_us);
674         DRV_LOG(DEBUG, "no traffic time is %u s.", priv->no_traffic_time_s);
675 }
676
677 /**
678  * DPDK callback to register a mlx5 PCI device.
679  *
680  * This function spawns vdpa device out of a given PCI device.
681  *
682  * @param[in] pci_drv
683  *   PCI driver structure (mlx5_vpda_driver).
684  * @param[in] pci_dev
685  *   PCI device information.
686  *
687  * @return
688  *   0 on success, 1 to skip this driver, a negative errno value otherwise
689  *   and rte_errno is set.
690  */
691 static int
692 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
693                     struct rte_pci_device *pci_dev __rte_unused)
694 {
695         struct ibv_device *ibv;
696         struct mlx5_vdpa_priv *priv = NULL;
697         struct ibv_context *ctx = NULL;
698         struct mlx5_hca_attr attr;
699         int ret;
700
701         ibv = mlx5_vdpa_get_ib_device_match(&pci_dev->addr);
702         if (!ibv) {
703                 DRV_LOG(ERR, "No matching IB device for PCI slot "
704                         PCI_PRI_FMT ".", pci_dev->addr.domain,
705                         pci_dev->addr.bus, pci_dev->addr.devid,
706                         pci_dev->addr.function);
707                 return -rte_errno;
708         } else {
709                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
710                         ibv->name);
711         }
712         if (mlx5_vdpa_roce_disable(&pci_dev->addr, &ibv) != 0) {
713                 DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
714                         ibv->name);
715                 return -rte_errno;
716         }
717         ctx = mlx5_glue->dv_open_device(ibv);
718         if (!ctx) {
719                 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
720                 rte_errno = ENODEV;
721                 return -rte_errno;
722         }
723         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
724         if (ret) {
725                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
726                 rte_errno = ENOTSUP;
727                 goto error;
728         } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
729                 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
730                         "old FW/OFED version?");
731                 rte_errno = ENOTSUP;
732                 goto error;
733         }
734         if (!attr.vdpa.queue_counters_valid)
735                 DRV_LOG(DEBUG, "No capability to support virtq statistics.");
736         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
737                            sizeof(struct mlx5_vdpa_virtq) *
738                            attr.vdpa.max_num_virtio_queues * 2,
739                            RTE_CACHE_LINE_SIZE);
740         if (!priv) {
741                 DRV_LOG(ERR, "Failed to allocate private memory.");
742                 rte_errno = ENOMEM;
743                 goto error;
744         }
745         priv->caps = attr.vdpa;
746         priv->log_max_rqt_size = attr.log_max_rqt_size;
747         priv->num_lag_ports = attr.num_lag_ports;
748         priv->qp_ts_format = attr.qp_ts_format;
749         if (attr.num_lag_ports == 0)
750                 priv->num_lag_ports = 1;
751         priv->ctx = ctx;
752         priv->pci_dev = pci_dev;
753         priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
754         if (!priv->var) {
755                 DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
756                 goto error;
757         }
758         priv->vdev = rte_vdpa_register_device(&pci_dev->device,
759                         &mlx5_vdpa_ops);
760         if (priv->vdev == NULL) {
761                 DRV_LOG(ERR, "Failed to register vDPA device.");
762                 rte_errno = rte_errno ? rte_errno : EINVAL;
763                 goto error;
764         }
765         mlx5_vdpa_config_get(pci_dev->device.devargs, priv);
766         SLIST_INIT(&priv->mr_list);
767         pthread_mutex_init(&priv->vq_config_lock, NULL);
768         pthread_mutex_lock(&priv_list_lock);
769         TAILQ_INSERT_TAIL(&priv_list, priv, next);
770         pthread_mutex_unlock(&priv_list_lock);
771         return 0;
772
773 error:
774         if (priv) {
775                 if (priv->var)
776                         mlx5_glue->dv_free_var(priv->var);
777                 rte_free(priv);
778         }
779         if (ctx)
780                 mlx5_glue->close_device(ctx);
781         return -rte_errno;
782 }
783
784 /**
785  * DPDK callback to remove a PCI device.
786  *
787  * This function removes all vDPA devices belong to a given PCI device.
788  *
789  * @param[in] pci_dev
790  *   Pointer to the PCI device.
791  *
792  * @return
793  *   0 on success, the function cannot fail.
794  */
795 static int
796 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
797 {
798         struct mlx5_vdpa_priv *priv = NULL;
799         int found = 0;
800
801         pthread_mutex_lock(&priv_list_lock);
802         TAILQ_FOREACH(priv, &priv_list, next) {
803                 if (!rte_pci_addr_cmp(&priv->pci_dev->addr, &pci_dev->addr)) {
804                         found = 1;
805                         break;
806                 }
807         }
808         if (found)
809                 TAILQ_REMOVE(&priv_list, priv, next);
810         pthread_mutex_unlock(&priv_list_lock);
811         if (found) {
812                 if (priv->configured)
813                         mlx5_vdpa_dev_close(priv->vid);
814                 if (priv->var) {
815                         mlx5_glue->dv_free_var(priv->var);
816                         priv->var = NULL;
817                 }
818                 mlx5_glue->close_device(priv->ctx);
819                 pthread_mutex_destroy(&priv->vq_config_lock);
820                 rte_free(priv);
821         }
822         return 0;
823 }
824
825 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
826         {
827                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
828                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
829         },
830         {
831                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
832                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
833         },
834         {
835                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
836                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
837         },
838         {
839                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
840                                 PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
841         },
842         {
843                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
844                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
845         },
846         {
847                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
848                                 PCI_DEVICE_ID_MELLANOX_CONNECTX7)
849         },
850         {
851                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
852                                 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
853         },
854         {
855                 .vendor_id = 0
856         }
857 };
858
859 static struct mlx5_pci_driver mlx5_vdpa_driver = {
860         .driver_class = MLX5_CLASS_VDPA,
861         .pci_driver = {
862                 .driver = {
863                         .name = "mlx5_vdpa",
864                 },
865                 .id_table = mlx5_vdpa_pci_id_map,
866                 .probe = mlx5_vdpa_pci_probe,
867                 .remove = mlx5_vdpa_pci_remove,
868                 .drv_flags = 0,
869         },
870 };
871
872 RTE_LOG_REGISTER(mlx5_vdpa_logtype, pmd.vdpa.mlx5, NOTICE)
873
874 /**
875  * Driver initialization routine.
876  */
877 RTE_INIT(rte_mlx5_vdpa_init)
878 {
879         mlx5_common_init();
880         if (mlx5_glue)
881                 mlx5_pci_driver_register(&mlx5_vdpa_driver);
882 }
883
884 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
885 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
886 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");