vdpa/mlx5: recreate a virtq becoming enabled
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5
6 #include <rte_malloc.h>
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_bus_pci.h>
10 #include <rte_pci.h>
11
12 #include <mlx5_glue.h>
13 #include <mlx5_common.h>
14 #include <mlx5_devx_cmds.h>
15 #include <mlx5_prm.h>
16 #include <mlx5_nl.h>
17
18 #include "mlx5_vdpa_utils.h"
19 #include "mlx5_vdpa.h"
20
21
22 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
23                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
24                             (1ULL << VIRTIO_NET_F_MQ) | \
25                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
26                             (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
27                             (1ULL << VHOST_F_LOG_ALL))
28
29 #define MLX5_VDPA_PROTOCOL_FEATURES \
30                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
31                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
32                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
33                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
34                              (1ULL << VHOST_USER_PROTOCOL_F_MQ))
35
36 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
37                                               TAILQ_HEAD_INITIALIZER(priv_list);
38 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
39 int mlx5_vdpa_logtype;
40
41 static struct mlx5_vdpa_priv *
42 mlx5_vdpa_find_priv_resource_by_did(int did)
43 {
44         struct mlx5_vdpa_priv *priv;
45         int found = 0;
46
47         pthread_mutex_lock(&priv_list_lock);
48         TAILQ_FOREACH(priv, &priv_list, next) {
49                 if (did == priv->id) {
50                         found = 1;
51                         break;
52                 }
53         }
54         pthread_mutex_unlock(&priv_list_lock);
55         if (!found) {
56                 DRV_LOG(ERR, "Invalid device id: %d.", did);
57                 rte_errno = EINVAL;
58                 return NULL;
59         }
60         return priv;
61 }
62
63 static int
64 mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
65 {
66         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
67
68         if (priv == NULL) {
69                 DRV_LOG(ERR, "Invalid device id: %d.", did);
70                 return -1;
71         }
72         *queue_num = priv->caps.max_num_virtio_queues;
73         return 0;
74 }
75
76 static int
77 mlx5_vdpa_get_vdpa_features(int did, uint64_t *features)
78 {
79         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
80
81         if (priv == NULL) {
82                 DRV_LOG(ERR, "Invalid device id: %d.", did);
83                 return -1;
84         }
85         *features = MLX5_VDPA_DEFAULT_FEATURES;
86         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
87                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
88         if (priv->caps.tso_ipv4)
89                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
90         if (priv->caps.tso_ipv6)
91                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
92         if (priv->caps.tx_csum)
93                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
94         if (priv->caps.rx_csum)
95                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
96         if (priv->caps.virtio_version_1_0)
97                 *features |= (1ULL << VIRTIO_F_VERSION_1);
98         return 0;
99 }
100
101 static int
102 mlx5_vdpa_get_protocol_features(int did, uint64_t *features)
103 {
104         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
105
106         if (priv == NULL) {
107                 DRV_LOG(ERR, "Invalid device id: %d.", did);
108                 return -1;
109         }
110         *features = MLX5_VDPA_PROTOCOL_FEATURES;
111         return 0;
112 }
113
114 static int
115 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
116 {
117         int did = rte_vhost_get_vdpa_device_id(vid);
118         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
119
120         if (priv == NULL) {
121                 DRV_LOG(ERR, "Invalid device id: %d.", did);
122                 return -EINVAL;
123         }
124         if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
125                 DRV_LOG(ERR, "Too big vring id: %d.", vring);
126                 return -E2BIG;
127         }
128         return mlx5_vdpa_virtq_enable(priv, vring, state);
129 }
130
131 static int
132 mlx5_vdpa_direct_db_prepare(struct mlx5_vdpa_priv *priv)
133 {
134         int ret;
135
136         if (priv->direct_notifier) {
137                 ret = rte_vhost_host_notifier_ctrl(priv->vid, false);
138                 if (ret != 0) {
139                         DRV_LOG(INFO, "Direct HW notifier FD cannot be "
140                                 "destroyed for device %d: %d.", priv->vid, ret);
141                         return -1;
142                 }
143                 priv->direct_notifier = 0;
144         }
145         ret = rte_vhost_host_notifier_ctrl(priv->vid, true);
146         if (ret != 0)
147                 DRV_LOG(INFO, "Direct HW notifier FD cannot be configured for"
148                         " device %d: %d.", priv->vid, ret);
149         else
150                 priv->direct_notifier = 1;
151         return 0;
152 }
153
154 static int
155 mlx5_vdpa_features_set(int vid)
156 {
157         int did = rte_vhost_get_vdpa_device_id(vid);
158         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
159         uint64_t log_base, log_size;
160         uint64_t features;
161         int ret;
162
163         if (priv == NULL) {
164                 DRV_LOG(ERR, "Invalid device id: %d.", did);
165                 return -EINVAL;
166         }
167         ret = rte_vhost_get_negotiated_features(vid, &features);
168         if (ret) {
169                 DRV_LOG(ERR, "Failed to get negotiated features.");
170                 return ret;
171         }
172         if (RTE_VHOST_NEED_LOG(features)) {
173                 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
174                 if (ret) {
175                         DRV_LOG(ERR, "Failed to get log base.");
176                         return ret;
177                 }
178                 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
179                 if (ret) {
180                         DRV_LOG(ERR, "Failed to set dirty bitmap.");
181                         return ret;
182                 }
183                 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
184                 ret = mlx5_vdpa_logging_enable(priv, 1);
185                 if (ret) {
186                         DRV_LOG(ERR, "Failed t enable dirty logging.");
187                         return ret;
188                 }
189         }
190         return 0;
191 }
192
193 static int
194 mlx5_vdpa_dev_close(int vid)
195 {
196         int did = rte_vhost_get_vdpa_device_id(vid);
197         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
198         int ret = 0;
199
200         if (priv == NULL) {
201                 DRV_LOG(ERR, "Invalid device id: %d.", did);
202                 return -1;
203         }
204         if (priv->configured)
205                 ret |= mlx5_vdpa_lm_log(priv);
206         mlx5_vdpa_cqe_event_unset(priv);
207         mlx5_vdpa_steer_unset(priv);
208         mlx5_vdpa_virtqs_release(priv);
209         mlx5_vdpa_event_qp_global_release(priv);
210         mlx5_vdpa_mem_dereg(priv);
211         priv->configured = 0;
212         priv->vid = 0;
213         DRV_LOG(INFO, "vDPA device %d was closed.", vid);
214         return ret;
215 }
216
217 static int
218 mlx5_vdpa_dev_config(int vid)
219 {
220         int did = rte_vhost_get_vdpa_device_id(vid);
221         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
222
223         if (priv == NULL) {
224                 DRV_LOG(ERR, "Invalid device id: %d.", did);
225                 return -EINVAL;
226         }
227         if (priv->configured && mlx5_vdpa_dev_close(vid)) {
228                 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
229                 return -1;
230         }
231         priv->vid = vid;
232         if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_direct_db_prepare(priv) ||
233             mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
234             mlx5_vdpa_cqe_event_setup(priv)) {
235                 mlx5_vdpa_dev_close(vid);
236                 return -1;
237         }
238         priv->configured = 1;
239         DRV_LOG(INFO, "vDPA device %d was configured.", vid);
240         return 0;
241 }
242
243 static int
244 mlx5_vdpa_get_device_fd(int vid)
245 {
246         int did = rte_vhost_get_vdpa_device_id(vid);
247         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
248
249         if (priv == NULL) {
250                 DRV_LOG(ERR, "Invalid device id: %d.", did);
251                 return -EINVAL;
252         }
253         return priv->ctx->cmd_fd;
254 }
255
256 static int
257 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
258 {
259         int did = rte_vhost_get_vdpa_device_id(vid);
260         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
261
262         RTE_SET_USED(qid);
263         if (priv == NULL) {
264                 DRV_LOG(ERR, "Invalid device id: %d.", did);
265                 return -EINVAL;
266         }
267         if (!priv->var) {
268                 DRV_LOG(ERR, "VAR was not created for device %d, is the device"
269                         " configured?.", did);
270                 return -EINVAL;
271         }
272         *offset = priv->var->mmap_off;
273         *size = priv->var->length;
274         return 0;
275 }
276
277 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
278         .get_queue_num = mlx5_vdpa_get_queue_num,
279         .get_features = mlx5_vdpa_get_vdpa_features,
280         .get_protocol_features = mlx5_vdpa_get_protocol_features,
281         .dev_conf = mlx5_vdpa_dev_config,
282         .dev_close = mlx5_vdpa_dev_close,
283         .set_vring_state = mlx5_vdpa_set_vring_state,
284         .set_features = mlx5_vdpa_features_set,
285         .migration_done = NULL,
286         .get_vfio_group_fd = NULL,
287         .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
288         .get_notify_area = mlx5_vdpa_get_notify_area,
289 };
290
291 static struct ibv_device *
292 mlx5_vdpa_get_ib_device_match(struct rte_pci_addr *addr)
293 {
294         int n;
295         struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
296         struct ibv_device *ibv_match = NULL;
297
298         if (!ibv_list) {
299                 rte_errno = ENOSYS;
300                 return NULL;
301         }
302         while (n-- > 0) {
303                 struct rte_pci_addr pci_addr;
304
305                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
306                 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &pci_addr))
307                         continue;
308                 if (memcmp(addr, &pci_addr, sizeof(pci_addr)))
309                         continue;
310                 ibv_match = ibv_list[n];
311                 break;
312         }
313         if (!ibv_match)
314                 rte_errno = ENOENT;
315         mlx5_glue->free_device_list(ibv_list);
316         return ibv_match;
317 }
318
319 /* Try to disable ROCE by Netlink\Devlink. */
320 static int
321 mlx5_vdpa_nl_roce_disable(const char *addr)
322 {
323         int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
324         int devlink_id;
325         int enable;
326         int ret;
327
328         if (nlsk_fd < 0)
329                 return nlsk_fd;
330         devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
331         if (devlink_id < 0) {
332                 ret = devlink_id;
333                 DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
334                         " Netlink.");
335                 goto close;
336         }
337         ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
338         if (ret) {
339                 DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
340                         ret);
341                 goto close;
342         } else if (!enable) {
343                 DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
344                 goto close;
345         }
346         ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
347         if (ret)
348                 DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
349         else
350                 DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
351 close:
352         close(nlsk_fd);
353         return ret;
354 }
355
356 /* Try to disable ROCE by sysfs. */
357 static int
358 mlx5_vdpa_sys_roce_disable(const char *addr)
359 {
360         FILE *file_o;
361         int enable;
362         int ret;
363
364         MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
365         file_o = fopen(file_p, "rb");
366         if (!file_o) {
367                 rte_errno = ENOTSUP;
368                 return -ENOTSUP;
369         }
370         ret = fscanf(file_o, "%d", &enable);
371         if (ret != 1) {
372                 rte_errno = EINVAL;
373                 ret = EINVAL;
374                 goto close;
375         } else if (!enable) {
376                 ret = 0;
377                 DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
378                 goto close;
379         }
380         fclose(file_o);
381         file_o = fopen(file_p, "wb");
382         if (!file_o) {
383                 rte_errno = ENOTSUP;
384                 return -ENOTSUP;
385         }
386         fprintf(file_o, "0\n");
387         ret = 0;
388 close:
389         if (ret)
390                 DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
391         else
392                 DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
393         fclose(file_o);
394         return ret;
395 }
396
397 #define MLX5_VDPA_MAX_RETRIES 20
398 #define MLX5_VDPA_USEC 1000
399 static int
400 mlx5_vdpa_roce_disable(struct rte_pci_addr *addr, struct ibv_device **ibv)
401 {
402         char addr_name[64] = {0};
403
404         rte_pci_device_name(addr, addr_name, sizeof(addr_name));
405         /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
406         if (mlx5_vdpa_nl_roce_disable(addr_name) == 0 ||
407             mlx5_vdpa_sys_roce_disable(addr_name) == 0) {
408                 /*
409                  * Succeed to disable ROCE, wait for the IB device to appear
410                  * again after reload.
411                  */
412                 int r;
413                 struct ibv_device *ibv_new;
414
415                 for (r = MLX5_VDPA_MAX_RETRIES; r; r--) {
416                         ibv_new = mlx5_vdpa_get_ib_device_match(addr);
417                         if (ibv_new) {
418                                 *ibv = ibv_new;
419                                 return 0;
420                         }
421                         usleep(MLX5_VDPA_USEC);
422                 }
423                 DRV_LOG(ERR, "Cannot much device %s after ROCE disable, "
424                         "retries exceed %d", addr_name, MLX5_VDPA_MAX_RETRIES);
425                 rte_errno = EAGAIN;
426         }
427         return -rte_errno;
428 }
429
430 /**
431  * DPDK callback to register a PCI device.
432  *
433  * This function spawns vdpa device out of a given PCI device.
434  *
435  * @param[in] pci_drv
436  *   PCI driver structure (mlx5_vpda_driver).
437  * @param[in] pci_dev
438  *   PCI device information.
439  *
440  * @return
441  *   0 on success, 1 to skip this driver, a negative errno value otherwise
442  *   and rte_errno is set.
443  */
444 static int
445 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
446                     struct rte_pci_device *pci_dev __rte_unused)
447 {
448         struct ibv_device *ibv;
449         struct mlx5_vdpa_priv *priv = NULL;
450         struct ibv_context *ctx = NULL;
451         struct mlx5_hca_attr attr;
452         int ret;
453
454         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
455                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
456                         " driver.");
457                 return 1;
458         }
459         ibv = mlx5_vdpa_get_ib_device_match(&pci_dev->addr);
460         if (!ibv) {
461                 DRV_LOG(ERR, "No matching IB device for PCI slot "
462                         PCI_PRI_FMT ".", pci_dev->addr.domain,
463                         pci_dev->addr.bus, pci_dev->addr.devid,
464                         pci_dev->addr.function);
465                 return -rte_errno;
466         } else {
467                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
468                         ibv->name);
469         }
470         if (mlx5_vdpa_roce_disable(&pci_dev->addr, &ibv) != 0) {
471                 DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
472                         ibv->name);
473                 return -rte_errno;
474         }
475         ctx = mlx5_glue->dv_open_device(ibv);
476         if (!ctx) {
477                 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
478                 rte_errno = ENODEV;
479                 return -rte_errno;
480         }
481         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
482         if (ret) {
483                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
484                 rte_errno = ENOTSUP;
485                 goto error;
486         } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
487                 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
488                         "old FW/OFED version?");
489                 rte_errno = ENOTSUP;
490                 goto error;
491         }
492         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
493                            sizeof(struct mlx5_vdpa_virtq) *
494                            attr.vdpa.max_num_virtio_queues * 2,
495                            RTE_CACHE_LINE_SIZE);
496         if (!priv) {
497                 DRV_LOG(ERR, "Failed to allocate private memory.");
498                 rte_errno = ENOMEM;
499                 goto error;
500         }
501         priv->caps = attr.vdpa;
502         priv->log_max_rqt_size = attr.log_max_rqt_size;
503         priv->ctx = ctx;
504         priv->dev_addr.pci_addr = pci_dev->addr;
505         priv->dev_addr.type = VDPA_ADDR_PCI;
506         priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
507         if (!priv->var) {
508                 DRV_LOG(ERR, "Failed to allocate VAR %u.\n", errno);
509                 goto error;
510         }
511         priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
512         if (priv->id < 0) {
513                 DRV_LOG(ERR, "Failed to register vDPA device.");
514                 rte_errno = rte_errno ? rte_errno : EINVAL;
515                 goto error;
516         }
517         SLIST_INIT(&priv->mr_list);
518         pthread_mutex_lock(&priv_list_lock);
519         TAILQ_INSERT_TAIL(&priv_list, priv, next);
520         pthread_mutex_unlock(&priv_list_lock);
521         return 0;
522
523 error:
524         if (priv) {
525                 if (priv->var)
526                         mlx5_glue->dv_free_var(priv->var);
527                 rte_free(priv);
528         }
529         if (ctx)
530                 mlx5_glue->close_device(ctx);
531         return -rte_errno;
532 }
533
534 /**
535  * DPDK callback to remove a PCI device.
536  *
537  * This function removes all vDPA devices belong to a given PCI device.
538  *
539  * @param[in] pci_dev
540  *   Pointer to the PCI device.
541  *
542  * @return
543  *   0 on success, the function cannot fail.
544  */
545 static int
546 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
547 {
548         struct mlx5_vdpa_priv *priv = NULL;
549         int found = 0;
550
551         pthread_mutex_lock(&priv_list_lock);
552         TAILQ_FOREACH(priv, &priv_list, next) {
553                 if (memcmp(&priv->dev_addr.pci_addr, &pci_dev->addr,
554                            sizeof(pci_dev->addr)) == 0) {
555                         found = 1;
556                         break;
557                 }
558         }
559         if (found)
560                 TAILQ_REMOVE(&priv_list, priv, next);
561         pthread_mutex_unlock(&priv_list_lock);
562         if (found) {
563                 if (priv->configured)
564                         mlx5_vdpa_dev_close(priv->vid);
565                 if (priv->var) {
566                         mlx5_glue->dv_free_var(priv->var);
567                         priv->var = NULL;
568                 }
569                 mlx5_glue->close_device(priv->ctx);
570                 rte_free(priv);
571         }
572         return 0;
573 }
574
575 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
576         {
577                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
578                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
579         },
580         {
581                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
582                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
583         },
584         {
585                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
586                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
587         },
588         {
589                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
590                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
591         },
592         {
593                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
594                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
595         },
596         {
597                 .vendor_id = 0
598         }
599 };
600
601 static struct rte_pci_driver mlx5_vdpa_driver = {
602         .driver = {
603                 .name = "mlx5_vdpa",
604         },
605         .id_table = mlx5_vdpa_pci_id_map,
606         .probe = mlx5_vdpa_pci_probe,
607         .remove = mlx5_vdpa_pci_remove,
608         .drv_flags = 0,
609 };
610
611 /**
612  * Driver initialization routine.
613  */
614 RTE_INIT(rte_mlx5_vdpa_init)
615 {
616         /* Initialize common log type. */
617         mlx5_vdpa_logtype = rte_log_register("pmd.vdpa.mlx5");
618         if (mlx5_vdpa_logtype >= 0)
619                 rte_log_set_level(mlx5_vdpa_logtype, RTE_LOG_NOTICE);
620         if (mlx5_glue)
621                 rte_pci_register(&mlx5_vdpa_driver);
622 }
623
624 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
625 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
626 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");