70327dfda59f345af3b406c85ef03324728b6bfb
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5
6 #include <rte_malloc.h>
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_bus_pci.h>
10 #include <rte_pci.h>
11
12 #include <mlx5_glue.h>
13 #include <mlx5_common.h>
14 #include <mlx5_devx_cmds.h>
15 #include <mlx5_prm.h>
16 #include <mlx5_nl.h>
17
18 #include "mlx5_vdpa_utils.h"
19 #include "mlx5_vdpa.h"
20
21
22 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
23                             (1ULL << VIRTIO_F_ANY_LAYOUT) | \
24                             (1ULL << VIRTIO_NET_F_MQ) | \
25                             (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
26                             (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
27                             (1ULL << VHOST_F_LOG_ALL))
28
29 #define MLX5_VDPA_PROTOCOL_FEATURES \
30                             ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
31                              (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
32                              (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
33                              (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
34                              (1ULL << VHOST_USER_PROTOCOL_F_MQ))
35
36 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
37                                               TAILQ_HEAD_INITIALIZER(priv_list);
38 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
39 int mlx5_vdpa_logtype;
40
41 static struct mlx5_vdpa_priv *
42 mlx5_vdpa_find_priv_resource_by_did(int did)
43 {
44         struct mlx5_vdpa_priv *priv;
45         int found = 0;
46
47         pthread_mutex_lock(&priv_list_lock);
48         TAILQ_FOREACH(priv, &priv_list, next) {
49                 if (did == priv->id) {
50                         found = 1;
51                         break;
52                 }
53         }
54         pthread_mutex_unlock(&priv_list_lock);
55         if (!found) {
56                 DRV_LOG(ERR, "Invalid device id: %d.", did);
57                 rte_errno = EINVAL;
58                 return NULL;
59         }
60         return priv;
61 }
62
63 static int
64 mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
65 {
66         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
67
68         if (priv == NULL) {
69                 DRV_LOG(ERR, "Invalid device id: %d.", did);
70                 return -1;
71         }
72         *queue_num = priv->caps.max_num_virtio_queues;
73         return 0;
74 }
75
76 static int
77 mlx5_vdpa_get_vdpa_features(int did, uint64_t *features)
78 {
79         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
80
81         if (priv == NULL) {
82                 DRV_LOG(ERR, "Invalid device id: %d.", did);
83                 return -1;
84         }
85         *features = MLX5_VDPA_DEFAULT_FEATURES;
86         if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
87                 *features |= (1ULL << VIRTIO_F_RING_PACKED);
88         if (priv->caps.tso_ipv4)
89                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
90         if (priv->caps.tso_ipv6)
91                 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
92         if (priv->caps.tx_csum)
93                 *features |= (1ULL << VIRTIO_NET_F_CSUM);
94         if (priv->caps.rx_csum)
95                 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
96         if (priv->caps.virtio_version_1_0)
97                 *features |= (1ULL << VIRTIO_F_VERSION_1);
98         return 0;
99 }
100
101 static int
102 mlx5_vdpa_get_protocol_features(int did, uint64_t *features)
103 {
104         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
105
106         if (priv == NULL) {
107                 DRV_LOG(ERR, "Invalid device id: %d.", did);
108                 return -1;
109         }
110         *features = MLX5_VDPA_PROTOCOL_FEATURES;
111         return 0;
112 }
113
114 static int
115 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
116 {
117         int did = rte_vhost_get_vdpa_device_id(vid);
118         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
119
120         if (priv == NULL) {
121                 DRV_LOG(ERR, "Invalid device id: %d.", did);
122                 return -EINVAL;
123         }
124         if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
125             (int)priv->caps.max_num_virtio_queues * 2) ||
126             !priv->virtqs[vring].virtq) {
127                 DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
128                 return -EINVAL;
129         }
130         return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
131 }
132
133 static int
134 mlx5_vdpa_direct_db_prepare(struct mlx5_vdpa_priv *priv)
135 {
136         int ret;
137
138         if (priv->direct_notifier) {
139                 ret = rte_vhost_host_notifier_ctrl(priv->vid, false);
140                 if (ret != 0) {
141                         DRV_LOG(INFO, "Direct HW notifier FD cannot be "
142                                 "destroyed for device %d: %d.", priv->vid, ret);
143                         return -1;
144                 }
145                 priv->direct_notifier = 0;
146         }
147         ret = rte_vhost_host_notifier_ctrl(priv->vid, true);
148         if (ret != 0)
149                 DRV_LOG(INFO, "Direct HW notifier FD cannot be configured for"
150                         " device %d: %d.", priv->vid, ret);
151         else
152                 priv->direct_notifier = 1;
153         return 0;
154 }
155
156 static int
157 mlx5_vdpa_features_set(int vid)
158 {
159         int did = rte_vhost_get_vdpa_device_id(vid);
160         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
161         uint64_t log_base, log_size;
162         uint64_t features;
163         int ret;
164
165         if (priv == NULL) {
166                 DRV_LOG(ERR, "Invalid device id: %d.", did);
167                 return -EINVAL;
168         }
169         ret = rte_vhost_get_negotiated_features(vid, &features);
170         if (ret) {
171                 DRV_LOG(ERR, "Failed to get negotiated features.");
172                 return ret;
173         }
174         if (RTE_VHOST_NEED_LOG(features)) {
175                 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
176                 if (ret) {
177                         DRV_LOG(ERR, "Failed to get log base.");
178                         return ret;
179                 }
180                 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
181                 if (ret) {
182                         DRV_LOG(ERR, "Failed to set dirty bitmap.");
183                         return ret;
184                 }
185                 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
186                 ret = mlx5_vdpa_logging_enable(priv, 1);
187                 if (ret) {
188                         DRV_LOG(ERR, "Failed t enable dirty logging.");
189                         return ret;
190                 }
191         }
192         return 0;
193 }
194
195 static int
196 mlx5_vdpa_dev_close(int vid)
197 {
198         int did = rte_vhost_get_vdpa_device_id(vid);
199         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
200         int ret = 0;
201
202         if (priv == NULL) {
203                 DRV_LOG(ERR, "Invalid device id: %d.", did);
204                 return -1;
205         }
206         if (priv->configured)
207                 ret |= mlx5_vdpa_lm_log(priv);
208         mlx5_vdpa_cqe_event_unset(priv);
209         ret |= mlx5_vdpa_steer_unset(priv);
210         mlx5_vdpa_virtqs_release(priv);
211         mlx5_vdpa_event_qp_global_release(priv);
212         mlx5_vdpa_mem_dereg(priv);
213         priv->configured = 0;
214         priv->vid = 0;
215         DRV_LOG(INFO, "vDPA device %d was closed.", vid);
216         return ret;
217 }
218
219 static int
220 mlx5_vdpa_dev_config(int vid)
221 {
222         int did = rte_vhost_get_vdpa_device_id(vid);
223         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
224
225         if (priv == NULL) {
226                 DRV_LOG(ERR, "Invalid device id: %d.", did);
227                 return -EINVAL;
228         }
229         if (priv->configured && mlx5_vdpa_dev_close(vid)) {
230                 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
231                 return -1;
232         }
233         priv->vid = vid;
234         if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_direct_db_prepare(priv) ||
235             mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
236             mlx5_vdpa_cqe_event_setup(priv)) {
237                 mlx5_vdpa_dev_close(vid);
238                 return -1;
239         }
240         priv->configured = 1;
241         DRV_LOG(INFO, "vDPA device %d was configured.", vid);
242         return 0;
243 }
244
245 static int
246 mlx5_vdpa_get_device_fd(int vid)
247 {
248         int did = rte_vhost_get_vdpa_device_id(vid);
249         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
250
251         if (priv == NULL) {
252                 DRV_LOG(ERR, "Invalid device id: %d.", did);
253                 return -EINVAL;
254         }
255         return priv->ctx->cmd_fd;
256 }
257
258 static int
259 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
260 {
261         int did = rte_vhost_get_vdpa_device_id(vid);
262         struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
263
264         RTE_SET_USED(qid);
265         if (priv == NULL) {
266                 DRV_LOG(ERR, "Invalid device id: %d.", did);
267                 return -EINVAL;
268         }
269         if (!priv->var) {
270                 DRV_LOG(ERR, "VAR was not created for device %d, is the device"
271                         " configured?.", did);
272                 return -EINVAL;
273         }
274         *offset = priv->var->mmap_off;
275         *size = priv->var->length;
276         return 0;
277 }
278
279 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
280         .get_queue_num = mlx5_vdpa_get_queue_num,
281         .get_features = mlx5_vdpa_get_vdpa_features,
282         .get_protocol_features = mlx5_vdpa_get_protocol_features,
283         .dev_conf = mlx5_vdpa_dev_config,
284         .dev_close = mlx5_vdpa_dev_close,
285         .set_vring_state = mlx5_vdpa_set_vring_state,
286         .set_features = mlx5_vdpa_features_set,
287         .migration_done = NULL,
288         .get_vfio_group_fd = NULL,
289         .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
290         .get_notify_area = mlx5_vdpa_get_notify_area,
291 };
292
293 static struct ibv_device *
294 mlx5_vdpa_get_ib_device_match(struct rte_pci_addr *addr)
295 {
296         int n;
297         struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
298         struct ibv_device *ibv_match = NULL;
299
300         if (!ibv_list) {
301                 rte_errno = ENOSYS;
302                 return NULL;
303         }
304         while (n-- > 0) {
305                 struct rte_pci_addr pci_addr;
306
307                 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
308                 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &pci_addr))
309                         continue;
310                 if (memcmp(addr, &pci_addr, sizeof(pci_addr)))
311                         continue;
312                 ibv_match = ibv_list[n];
313                 break;
314         }
315         if (!ibv_match)
316                 rte_errno = ENOENT;
317         mlx5_glue->free_device_list(ibv_list);
318         return ibv_match;
319 }
320
321 /* Try to disable ROCE by Netlink\Devlink. */
322 static int
323 mlx5_vdpa_nl_roce_disable(const char *addr)
324 {
325         int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
326         int devlink_id;
327         int enable;
328         int ret;
329
330         if (nlsk_fd < 0)
331                 return nlsk_fd;
332         devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
333         if (devlink_id < 0) {
334                 ret = devlink_id;
335                 DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
336                         " Netlink.");
337                 goto close;
338         }
339         ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
340         if (ret) {
341                 DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
342                         ret);
343                 goto close;
344         } else if (!enable) {
345                 DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
346                 goto close;
347         }
348         ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
349         if (ret)
350                 DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
351         else
352                 DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
353 close:
354         close(nlsk_fd);
355         return ret;
356 }
357
358 /* Try to disable ROCE by sysfs. */
359 static int
360 mlx5_vdpa_sys_roce_disable(const char *addr)
361 {
362         FILE *file_o;
363         int enable;
364         int ret;
365
366         MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
367         file_o = fopen(file_p, "rb");
368         if (!file_o) {
369                 rte_errno = ENOTSUP;
370                 return -ENOTSUP;
371         }
372         ret = fscanf(file_o, "%d", &enable);
373         if (ret != 1) {
374                 rte_errno = EINVAL;
375                 ret = EINVAL;
376                 goto close;
377         } else if (!enable) {
378                 ret = 0;
379                 DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
380                 goto close;
381         }
382         fclose(file_o);
383         file_o = fopen(file_p, "wb");
384         if (!file_o) {
385                 rte_errno = ENOTSUP;
386                 return -ENOTSUP;
387         }
388         fprintf(file_o, "0\n");
389         ret = 0;
390 close:
391         if (ret)
392                 DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
393         else
394                 DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
395         fclose(file_o);
396         return ret;
397 }
398
399 #define MLX5_VDPA_MAX_RETRIES 20
400 #define MLX5_VDPA_USEC 1000
401 static int
402 mlx5_vdpa_roce_disable(struct rte_pci_addr *addr, struct ibv_device **ibv)
403 {
404         char addr_name[64] = {0};
405
406         rte_pci_device_name(addr, addr_name, sizeof(addr_name));
407         /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
408         if (mlx5_vdpa_nl_roce_disable(addr_name) == 0 ||
409             mlx5_vdpa_sys_roce_disable(addr_name) == 0) {
410                 /*
411                  * Succeed to disable ROCE, wait for the IB device to appear
412                  * again after reload.
413                  */
414                 int r;
415                 struct ibv_device *ibv_new;
416
417                 for (r = MLX5_VDPA_MAX_RETRIES; r; r--) {
418                         ibv_new = mlx5_vdpa_get_ib_device_match(addr);
419                         if (ibv_new) {
420                                 *ibv = ibv_new;
421                                 return 0;
422                         }
423                         usleep(MLX5_VDPA_USEC);
424                 }
425                 DRV_LOG(ERR, "Cannot much device %s after ROCE disable, "
426                         "retries exceed %d", addr_name, MLX5_VDPA_MAX_RETRIES);
427                 rte_errno = EAGAIN;
428         }
429         return -rte_errno;
430 }
431
432 /**
433  * DPDK callback to register a PCI device.
434  *
435  * This function spawns vdpa device out of a given PCI device.
436  *
437  * @param[in] pci_drv
438  *   PCI driver structure (mlx5_vpda_driver).
439  * @param[in] pci_dev
440  *   PCI device information.
441  *
442  * @return
443  *   0 on success, 1 to skip this driver, a negative errno value otherwise
444  *   and rte_errno is set.
445  */
446 static int
447 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
448                     struct rte_pci_device *pci_dev __rte_unused)
449 {
450         struct ibv_device *ibv;
451         struct mlx5_vdpa_priv *priv = NULL;
452         struct ibv_context *ctx = NULL;
453         struct mlx5_hca_attr attr;
454         int ret;
455
456         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
457                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
458                         " driver.");
459                 return 1;
460         }
461         ibv = mlx5_vdpa_get_ib_device_match(&pci_dev->addr);
462         if (!ibv) {
463                 DRV_LOG(ERR, "No matching IB device for PCI slot "
464                         PCI_PRI_FMT ".", pci_dev->addr.domain,
465                         pci_dev->addr.bus, pci_dev->addr.devid,
466                         pci_dev->addr.function);
467                 return -rte_errno;
468         } else {
469                 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
470                         ibv->name);
471         }
472         if (mlx5_vdpa_roce_disable(&pci_dev->addr, &ibv) != 0) {
473                 DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
474                         ibv->name);
475                 return -rte_errno;
476         }
477         ctx = mlx5_glue->dv_open_device(ibv);
478         if (!ctx) {
479                 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
480                 rte_errno = ENODEV;
481                 return -rte_errno;
482         }
483         ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
484         if (ret) {
485                 DRV_LOG(ERR, "Unable to read HCA capabilities.");
486                 rte_errno = ENOTSUP;
487                 goto error;
488         } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
489                 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
490                         "old FW/OFED version?");
491                 rte_errno = ENOTSUP;
492                 goto error;
493         }
494         priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
495                            sizeof(struct mlx5_vdpa_virtq) *
496                            attr.vdpa.max_num_virtio_queues * 2,
497                            RTE_CACHE_LINE_SIZE);
498         if (!priv) {
499                 DRV_LOG(ERR, "Failed to allocate private memory.");
500                 rte_errno = ENOMEM;
501                 goto error;
502         }
503         priv->caps = attr.vdpa;
504         priv->log_max_rqt_size = attr.log_max_rqt_size;
505         priv->ctx = ctx;
506         priv->dev_addr.pci_addr = pci_dev->addr;
507         priv->dev_addr.type = VDPA_ADDR_PCI;
508         priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
509         if (!priv->var) {
510                 DRV_LOG(ERR, "Failed to allocate VAR %u.\n", errno);
511                 goto error;
512         }
513         priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
514         if (priv->id < 0) {
515                 DRV_LOG(ERR, "Failed to register vDPA device.");
516                 rte_errno = rte_errno ? rte_errno : EINVAL;
517                 goto error;
518         }
519         SLIST_INIT(&priv->mr_list);
520         pthread_mutex_lock(&priv_list_lock);
521         TAILQ_INSERT_TAIL(&priv_list, priv, next);
522         pthread_mutex_unlock(&priv_list_lock);
523         return 0;
524
525 error:
526         if (priv) {
527                 if (priv->var)
528                         mlx5_glue->dv_free_var(priv->var);
529                 rte_free(priv);
530         }
531         if (ctx)
532                 mlx5_glue->close_device(ctx);
533         return -rte_errno;
534 }
535
536 /**
537  * DPDK callback to remove a PCI device.
538  *
539  * This function removes all vDPA devices belong to a given PCI device.
540  *
541  * @param[in] pci_dev
542  *   Pointer to the PCI device.
543  *
544  * @return
545  *   0 on success, the function cannot fail.
546  */
547 static int
548 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
549 {
550         struct mlx5_vdpa_priv *priv = NULL;
551         int found = 0;
552
553         pthread_mutex_lock(&priv_list_lock);
554         TAILQ_FOREACH(priv, &priv_list, next) {
555                 if (memcmp(&priv->dev_addr.pci_addr, &pci_dev->addr,
556                            sizeof(pci_dev->addr)) == 0) {
557                         found = 1;
558                         break;
559                 }
560         }
561         if (found)
562                 TAILQ_REMOVE(&priv_list, priv, next);
563         pthread_mutex_unlock(&priv_list_lock);
564         if (found) {
565                 if (priv->configured)
566                         mlx5_vdpa_dev_close(priv->vid);
567                 if (priv->var) {
568                         mlx5_glue->dv_free_var(priv->var);
569                         priv->var = NULL;
570                 }
571                 mlx5_glue->close_device(priv->ctx);
572                 rte_free(priv);
573         }
574         return 0;
575 }
576
577 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
578         {
579                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
580                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
581         },
582         {
583                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
584                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
585         },
586         {
587                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
588                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
589         },
590         {
591                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
592                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
593         },
594         {
595                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
596                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
597         },
598         {
599                 .vendor_id = 0
600         }
601 };
602
603 static struct rte_pci_driver mlx5_vdpa_driver = {
604         .driver = {
605                 .name = "mlx5_vdpa",
606         },
607         .id_table = mlx5_vdpa_pci_id_map,
608         .probe = mlx5_vdpa_pci_probe,
609         .remove = mlx5_vdpa_pci_remove,
610         .drv_flags = 0,
611 };
612
613 /**
614  * Driver initialization routine.
615  */
616 RTE_INIT(rte_mlx5_vdpa_init)
617 {
618         /* Initialize common log type. */
619         mlx5_vdpa_logtype = rte_log_register("pmd.vdpa.mlx5");
620         if (mlx5_vdpa_logtype >= 0)
621                 rte_log_set_level(mlx5_vdpa_logtype, RTE_LOG_NOTICE);
622         if (mlx5_glue)
623                 rte_pci_register(&mlx5_vdpa_driver);
624 }
625
626 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
627 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
628 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");