1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/socket.h>
10 #include <netinet/in.h>
12 #include <rte_malloc.h>
14 #include <rte_errno.h>
15 #include <rte_string_fns.h>
16 #include <rte_bus_pci.h>
18 #include <mlx5_glue.h>
19 #include <mlx5_common.h>
20 #include <mlx5_common_defs.h>
21 #include <mlx5_devx_cmds.h>
25 #include "mlx5_vdpa_utils.h"
26 #include "mlx5_vdpa.h"
28 #define MLX5_VDPA_DRIVER_NAME vdpa_mlx5
30 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
31 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
32 (1ULL << VIRTIO_NET_F_MQ) | \
33 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
34 (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
35 (1ULL << VHOST_F_LOG_ALL) | \
36 (1ULL << VIRTIO_NET_F_MTU))
38 #define MLX5_VDPA_PROTOCOL_FEATURES \
39 ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
40 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
41 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
42 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
43 (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
44 (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
45 (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
47 #define MLX5_VDPA_DEFAULT_NO_TRAFFIC_MAX 16LLU
49 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
50 TAILQ_HEAD_INITIALIZER(priv_list);
51 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
53 static void mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv);
55 static struct mlx5_vdpa_priv *
56 mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
58 struct mlx5_vdpa_priv *priv;
61 pthread_mutex_lock(&priv_list_lock);
62 TAILQ_FOREACH(priv, &priv_list, next) {
63 if (vdev == priv->vdev) {
68 pthread_mutex_unlock(&priv_list_lock);
70 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
78 mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
80 struct mlx5_vdpa_priv *priv =
81 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
84 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
87 *queue_num = priv->caps.max_num_virtio_queues;
92 mlx5_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
94 struct mlx5_vdpa_priv *priv =
95 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
98 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
101 *features = MLX5_VDPA_DEFAULT_FEATURES;
102 if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
103 *features |= (1ULL << VIRTIO_F_RING_PACKED);
104 if (priv->caps.tso_ipv4)
105 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
106 if (priv->caps.tso_ipv6)
107 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
108 if (priv->caps.tx_csum)
109 *features |= (1ULL << VIRTIO_NET_F_CSUM);
110 if (priv->caps.rx_csum)
111 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
112 if (priv->caps.virtio_version_1_0)
113 *features |= (1ULL << VIRTIO_F_VERSION_1);
118 mlx5_vdpa_get_protocol_features(struct rte_vdpa_device *vdev,
121 struct mlx5_vdpa_priv *priv =
122 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
125 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
128 *features = MLX5_VDPA_PROTOCOL_FEATURES;
133 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
135 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
136 struct mlx5_vdpa_priv *priv =
137 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
141 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
144 if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
145 DRV_LOG(ERR, "Too big vring id: %d.", vring);
148 pthread_mutex_lock(&priv->vq_config_lock);
149 ret = mlx5_vdpa_virtq_enable(priv, vring, state);
150 pthread_mutex_unlock(&priv->vq_config_lock);
155 mlx5_vdpa_features_set(int vid)
157 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
158 struct mlx5_vdpa_priv *priv =
159 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
160 uint64_t log_base, log_size;
165 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
168 ret = rte_vhost_get_negotiated_features(vid, &features);
170 DRV_LOG(ERR, "Failed to get negotiated features.");
173 if (RTE_VHOST_NEED_LOG(features)) {
174 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
176 DRV_LOG(ERR, "Failed to get log base.");
179 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
181 DRV_LOG(ERR, "Failed to set dirty bitmap.");
184 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
185 ret = mlx5_vdpa_logging_enable(priv, 1);
187 DRV_LOG(ERR, "Failed t enable dirty logging.");
195 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
197 struct ifreq request;
198 uint16_t vhost_mtu = 0;
199 uint16_t kern_mtu = 0;
200 int ret = rte_vhost_get_mtu(priv->vid, &vhost_mtu);
202 int retries = MLX5_VDPA_MAX_RETRIES;
205 DRV_LOG(DEBUG, "Cannot get vhost MTU - %d.", ret);
209 DRV_LOG(DEBUG, "Vhost MTU is 0.");
212 ret = mlx5_get_ifname_sysfs
213 (mlx5_os_get_ctx_device_name(priv->cdev->ctx),
216 DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
219 sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
221 DRV_LOG(DEBUG, "Cannot open IF socket.");
225 ret = ioctl(sock, SIOCGIFMTU, &request);
228 kern_mtu = request.ifr_mtu;
229 DRV_LOG(DEBUG, "MTU: current %d requested %d.", (int)kern_mtu,
231 if (kern_mtu == vhost_mtu)
233 request.ifr_mtu = vhost_mtu;
234 ret = ioctl(sock, SIOCSIFMTU, &request);
238 usleep(MLX5_VDPA_USEC);
241 return kern_mtu == vhost_mtu ? 0 : -1;
245 mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)
247 mlx5_vdpa_virtqs_cleanup(priv);
248 mlx5_vdpa_mem_dereg(priv);
252 mlx5_vdpa_dev_close(int vid)
254 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
255 struct mlx5_vdpa_priv *priv =
256 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
260 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
263 mlx5_vdpa_cqe_event_unset(priv);
264 if (priv->state == MLX5_VDPA_STATE_CONFIGURED) {
265 ret |= mlx5_vdpa_lm_log(priv);
266 priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
268 mlx5_vdpa_steer_unset(priv);
269 mlx5_vdpa_virtqs_release(priv);
270 if (priv->lm_mr.addr)
271 mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
272 priv->state = MLX5_VDPA_STATE_PROBED;
273 if (!priv->connected)
274 mlx5_vdpa_dev_cache_clean(priv);
276 /* The mutex may stay locked after event thread cancel - initiate it. */
277 pthread_mutex_init(&priv->vq_config_lock, NULL);
278 DRV_LOG(INFO, "vDPA device %d was closed.", vid);
283 mlx5_vdpa_dev_config(int vid)
285 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
286 struct mlx5_vdpa_priv *priv =
287 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
290 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
293 if (priv->state == MLX5_VDPA_STATE_CONFIGURED &&
294 mlx5_vdpa_dev_close(vid)) {
295 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
299 priv->connected = true;
300 if (mlx5_vdpa_mtu_set(priv))
301 DRV_LOG(WARNING, "MTU cannot be set on device %s.",
303 if (mlx5_vdpa_mem_register(priv) ||
304 mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
305 mlx5_vdpa_cqe_event_setup(priv)) {
306 mlx5_vdpa_dev_close(vid);
309 priv->state = MLX5_VDPA_STATE_CONFIGURED;
310 DRV_LOG(INFO, "vDPA device %d was configured.", vid);
315 mlx5_vdpa_get_device_fd(int vid)
317 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
318 struct mlx5_vdpa_priv *priv =
319 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
322 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
325 return ((struct ibv_context *)priv->cdev->ctx)->cmd_fd;
329 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
331 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
332 struct mlx5_vdpa_priv *priv =
333 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
337 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
341 DRV_LOG(ERR, "VAR was not created for device %s, is the device"
342 " configured?.", vdev->device->name);
345 *offset = priv->var->mmap_off;
346 *size = priv->var->length;
351 mlx5_vdpa_get_stats_names(struct rte_vdpa_device *vdev,
352 struct rte_vdpa_stat_name *stats_names,
355 static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
356 "received_descriptors",
357 "completed_descriptors",
358 "bad descriptor errors",
363 struct mlx5_vdpa_priv *priv =
364 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
368 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
372 return MLX5_VDPA_STATS_MAX;
373 size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
374 for (i = 0; i < size; ++i)
375 strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
376 RTE_VDPA_STATS_NAME_SIZE);
381 mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
382 struct rte_vdpa_stat *stats, unsigned int n)
384 struct mlx5_vdpa_priv *priv =
385 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
388 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
391 if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
392 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
396 if (!priv->caps.queue_counters_valid) {
397 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
401 return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
405 mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
407 struct mlx5_vdpa_priv *priv =
408 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
411 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
414 if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
415 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
419 if (!priv->caps.queue_counters_valid) {
420 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
424 return mlx5_vdpa_virtq_stats_reset(priv, qid);
428 mlx5_vdpa_dev_cleanup(int vid)
430 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
431 struct mlx5_vdpa_priv *priv;
435 priv = mlx5_vdpa_find_priv_resource_by_vdev(vdev);
437 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
440 if (priv->state == MLX5_VDPA_STATE_PROBED)
441 mlx5_vdpa_dev_cache_clean(priv);
442 priv->connected = false;
446 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
447 .get_queue_num = mlx5_vdpa_get_queue_num,
448 .get_features = mlx5_vdpa_get_vdpa_features,
449 .get_protocol_features = mlx5_vdpa_get_protocol_features,
450 .dev_conf = mlx5_vdpa_dev_config,
451 .dev_close = mlx5_vdpa_dev_close,
452 .dev_cleanup = mlx5_vdpa_dev_cleanup,
453 .set_vring_state = mlx5_vdpa_set_vring_state,
454 .set_features = mlx5_vdpa_features_set,
455 .migration_done = NULL,
456 .get_vfio_group_fd = NULL,
457 .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
458 .get_notify_area = mlx5_vdpa_get_notify_area,
459 .get_stats_names = mlx5_vdpa_get_stats_names,
460 .get_stats = mlx5_vdpa_get_stats,
461 .reset_stats = mlx5_vdpa_reset_stats,
465 mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
467 struct mlx5_vdpa_priv *priv = opaque;
469 int n_cores = sysconf(_SC_NPROCESSORS_ONLN);
472 tmp = strtoul(val, NULL, 0);
474 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
477 if (strcmp(key, "event_mode") == 0) {
478 if (tmp <= MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT)
479 priv->event_mode = (int)tmp;
481 DRV_LOG(WARNING, "Invalid event_mode %s.", val);
482 } else if (strcmp(key, "event_us") == 0) {
483 priv->event_us = (uint32_t)tmp;
484 } else if (strcmp(key, "no_traffic_time") == 0) {
485 priv->no_traffic_max = (uint32_t)tmp;
486 } else if (strcmp(key, "event_core") == 0) {
487 if (tmp >= (unsigned long)n_cores)
488 DRV_LOG(WARNING, "Invalid event_core %s.", val);
490 priv->event_core = tmp;
491 } else if (strcmp(key, "hw_latency_mode") == 0) {
492 priv->hw_latency_mode = (uint32_t)tmp;
493 } else if (strcmp(key, "hw_max_latency_us") == 0) {
494 priv->hw_max_latency_us = (uint32_t)tmp;
495 } else if (strcmp(key, "hw_max_pending_comp") == 0) {
496 priv->hw_max_pending_comp = (uint32_t)tmp;
502 mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
503 struct mlx5_vdpa_priv *priv)
505 const char **params = (const char *[]){
511 "hw_max_pending_comp",
516 priv->event_mode = MLX5_VDPA_EVENT_MODE_FIXED_TIMER;
518 priv->event_core = -1;
519 priv->no_traffic_max = MLX5_VDPA_DEFAULT_NO_TRAFFIC_MAX;
522 mlx5_kvargs_process(mkvlist, params, mlx5_vdpa_args_check_handler,
524 if (!priv->event_us &&
525 priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER)
526 priv->event_us = MLX5_VDPA_DEFAULT_TIMER_STEP_US;
527 DRV_LOG(DEBUG, "event mode is %d.", priv->event_mode);
528 DRV_LOG(DEBUG, "event_us is %u us.", priv->event_us);
529 DRV_LOG(DEBUG, "no traffic max is %u.", priv->no_traffic_max);
533 mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)
535 struct mlx5_devx_tis_attr tis_attr = {0};
536 struct ibv_context *ctx = priv->cdev->ctx;
540 for (retry = 0; retry < 7; retry++) {
541 priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
542 if (priv->var != NULL)
544 DRV_LOG(WARNING, "Failed to allocate VAR, retry %d.", retry);
545 /* Wait Qemu release VAR during vdpa restart, 0.1 sec based. */
546 usleep(100000U << retry);
549 DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
553 /* Always map the entire page. */
554 priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
555 PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
556 priv->var->mmap_off);
557 if (priv->virtq_db_addr == MAP_FAILED) {
558 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
559 priv->virtq_db_addr = NULL;
563 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
564 priv->virtq_db_addr);
565 priv->td = mlx5_devx_cmd_create_td(ctx);
567 DRV_LOG(ERR, "Failed to create transport domain.");
571 tis_attr.transport_domain = priv->td->id;
572 for (i = 0; i < priv->num_lag_ports; i++) {
573 /* 0 is auto affinity, non-zero value to propose port. */
574 tis_attr.lag_tx_port_affinity = i + 1;
575 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
576 if (!priv->tiss[i]) {
577 DRV_LOG(ERR, "Failed to create TIS %u.", i);
581 priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
582 if (!priv->null_mr) {
583 DRV_LOG(ERR, "Failed to allocate null MR.");
587 DRV_LOG(DEBUG, "Dump fill Mkey = %u.", priv->null_mr->lkey);
588 #ifdef HAVE_MLX5DV_DR
589 priv->steer.domain = mlx5_glue->dr_create_domain(ctx,
590 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
591 if (!priv->steer.domain) {
592 DRV_LOG(ERR, "Failed to create Rx domain.");
597 priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
598 if (!priv->steer.tbl) {
599 DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
603 if (mlx5_vdpa_err_event_setup(priv) != 0)
605 if (mlx5_vdpa_event_qp_global_prepare(priv))
611 mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
612 struct mlx5_kvargs_ctrl *mkvlist)
614 struct mlx5_vdpa_priv *priv = NULL;
615 struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
617 if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
618 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
619 "old FW/OFED version?");
623 if (!attr->vdpa.queue_counters_valid)
624 DRV_LOG(DEBUG, "No capability to support virtq statistics.");
625 priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
626 sizeof(struct mlx5_vdpa_virtq) *
627 attr->vdpa.max_num_virtio_queues * 2,
628 RTE_CACHE_LINE_SIZE);
630 DRV_LOG(ERR, "Failed to allocate private memory.");
634 priv->caps = attr->vdpa;
635 priv->log_max_rqt_size = attr->log_max_rqt_size;
636 priv->num_lag_ports = attr->num_lag_ports;
637 if (attr->num_lag_ports == 0)
638 priv->num_lag_ports = 1;
639 pthread_mutex_init(&priv->vq_config_lock, NULL);
641 if (mlx5_vdpa_create_dev_resources(priv))
643 priv->vdev = rte_vdpa_register_device(cdev->dev, &mlx5_vdpa_ops);
644 if (priv->vdev == NULL) {
645 DRV_LOG(ERR, "Failed to register vDPA device.");
646 rte_errno = rte_errno ? rte_errno : EINVAL;
649 mlx5_vdpa_config_get(mkvlist, priv);
650 SLIST_INIT(&priv->mr_list);
651 pthread_mutex_lock(&priv_list_lock);
652 TAILQ_INSERT_TAIL(&priv_list, priv, next);
653 pthread_mutex_unlock(&priv_list_lock);
657 mlx5_vdpa_dev_release(priv);
662 mlx5_vdpa_dev_remove(struct mlx5_common_device *cdev)
664 struct mlx5_vdpa_priv *priv = NULL;
667 pthread_mutex_lock(&priv_list_lock);
668 TAILQ_FOREACH(priv, &priv_list, next) {
669 if (priv->vdev->device == cdev->dev) {
675 TAILQ_REMOVE(&priv_list, priv, next);
676 pthread_mutex_unlock(&priv_list_lock);
678 mlx5_vdpa_dev_release(priv);
683 mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
687 mlx5_vdpa_dev_cache_clean(priv);
688 for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
689 if (!priv->virtqs[i].counters)
691 claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
693 mlx5_vdpa_event_qp_global_release(priv);
694 mlx5_vdpa_err_event_unset(priv);
696 claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
697 if (priv->steer.domain)
698 claim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));
700 claim_zero(mlx5_glue->dereg_mr(priv->null_mr));
701 for (i = 0; i < priv->num_lag_ports; i++) {
703 claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
706 claim_zero(mlx5_devx_cmd_destroy(priv->td));
707 if (priv->virtq_db_addr)
708 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
710 mlx5_glue->dv_free_var(priv->var);
714 mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)
716 if (priv->state == MLX5_VDPA_STATE_CONFIGURED)
717 mlx5_vdpa_dev_close(priv->vid);
718 mlx5_vdpa_release_dev_resources(priv);
720 rte_vdpa_unregister_device(priv->vdev);
721 pthread_mutex_destroy(&priv->vq_config_lock);
725 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
727 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
728 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
731 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
732 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
735 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
736 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
739 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
740 PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
743 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
744 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
747 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
748 PCI_DEVICE_ID_MELLANOX_CONNECTX7)
751 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
752 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
759 static struct mlx5_class_driver mlx5_vdpa_driver = {
760 .drv_class = MLX5_CLASS_VDPA,
761 .name = RTE_STR(MLX5_VDPA_DRIVER_NAME),
762 .id_table = mlx5_vdpa_pci_id_map,
763 .probe = mlx5_vdpa_dev_probe,
764 .remove = mlx5_vdpa_dev_remove,
767 RTE_LOG_REGISTER_DEFAULT(mlx5_vdpa_logtype, NOTICE)
770 * Driver initialization routine.
772 RTE_INIT(rte_mlx5_vdpa_init)
776 mlx5_class_driver_register(&mlx5_vdpa_driver);
779 RTE_PMD_EXPORT_NAME(MLX5_VDPA_DRIVER_NAME, __COUNTER__);
780 RTE_PMD_REGISTER_PCI_TABLE(MLX5_VDPA_DRIVER_NAME, mlx5_vdpa_pci_id_map);
781 RTE_PMD_REGISTER_KMOD_DEP(MLX5_VDPA_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");