1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/socket.h>
9 #include <netinet/in.h>
11 #include <rte_malloc.h>
13 #include <rte_errno.h>
15 #include <rte_string_fns.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_common.h>
19 #include <mlx5_common_pci.h>
20 #include <mlx5_devx_cmds.h>
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
28 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
29 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
30 (1ULL << VIRTIO_NET_F_MQ) | \
31 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
32 (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
33 (1ULL << VHOST_F_LOG_ALL) | \
34 (1ULL << VIRTIO_NET_F_MTU))
36 #define MLX5_VDPA_PROTOCOL_FEATURES \
37 ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
38 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
39 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
40 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
41 (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
42 (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
43 (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
45 #define MLX5_VDPA_MAX_RETRIES 20
46 #define MLX5_VDPA_USEC 1000
47 #define MLX5_VDPA_DEFAULT_NO_TRAFFIC_TIME_S 2LLU
49 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
50 TAILQ_HEAD_INITIALIZER(priv_list);
51 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
53 static struct mlx5_vdpa_priv *
54 mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
56 struct mlx5_vdpa_priv *priv;
59 pthread_mutex_lock(&priv_list_lock);
60 TAILQ_FOREACH(priv, &priv_list, next) {
61 if (vdev == priv->vdev) {
66 pthread_mutex_unlock(&priv_list_lock);
68 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
76 mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
78 struct mlx5_vdpa_priv *priv =
79 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
82 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
85 *queue_num = priv->caps.max_num_virtio_queues;
90 mlx5_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
92 struct mlx5_vdpa_priv *priv =
93 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
96 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
99 *features = MLX5_VDPA_DEFAULT_FEATURES;
100 if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
101 *features |= (1ULL << VIRTIO_F_RING_PACKED);
102 if (priv->caps.tso_ipv4)
103 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
104 if (priv->caps.tso_ipv6)
105 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
106 if (priv->caps.tx_csum)
107 *features |= (1ULL << VIRTIO_NET_F_CSUM);
108 if (priv->caps.rx_csum)
109 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
110 if (priv->caps.virtio_version_1_0)
111 *features |= (1ULL << VIRTIO_F_VERSION_1);
116 mlx5_vdpa_get_protocol_features(struct rte_vdpa_device *vdev,
119 struct mlx5_vdpa_priv *priv =
120 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
123 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
126 *features = MLX5_VDPA_PROTOCOL_FEATURES;
131 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
133 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
134 struct mlx5_vdpa_priv *priv =
135 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
139 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
142 if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
143 DRV_LOG(ERR, "Too big vring id: %d.", vring);
146 pthread_mutex_lock(&priv->vq_config_lock);
147 ret = mlx5_vdpa_virtq_enable(priv, vring, state);
148 pthread_mutex_unlock(&priv->vq_config_lock);
153 mlx5_vdpa_features_set(int vid)
155 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
156 struct mlx5_vdpa_priv *priv =
157 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
158 uint64_t log_base, log_size;
163 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
166 ret = rte_vhost_get_negotiated_features(vid, &features);
168 DRV_LOG(ERR, "Failed to get negotiated features.");
171 if (RTE_VHOST_NEED_LOG(features)) {
172 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
174 DRV_LOG(ERR, "Failed to get log base.");
177 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
179 DRV_LOG(ERR, "Failed to set dirty bitmap.");
182 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
183 ret = mlx5_vdpa_logging_enable(priv, 1);
185 DRV_LOG(ERR, "Failed t enable dirty logging.");
193 mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
195 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
196 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
197 if (priv->pd == NULL) {
198 DRV_LOG(ERR, "Failed to allocate PD.");
199 return errno ? -errno : -ENOMEM;
201 struct mlx5dv_obj obj;
202 struct mlx5dv_pd pd_info;
205 obj.pd.in = priv->pd;
206 obj.pd.out = &pd_info;
207 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
209 DRV_LOG(ERR, "Fail to get PD object info.");
210 mlx5_glue->dealloc_pd(priv->pd);
214 priv->pdn = pd_info.pdn;
218 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
220 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
224 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
226 struct ifreq request;
227 uint16_t vhost_mtu = 0;
228 uint16_t kern_mtu = 0;
229 int ret = rte_vhost_get_mtu(priv->vid, &vhost_mtu);
231 int retries = MLX5_VDPA_MAX_RETRIES;
234 DRV_LOG(DEBUG, "Cannot get vhost MTU - %d.", ret);
238 DRV_LOG(DEBUG, "Vhost MTU is 0.");
241 ret = mlx5_get_ifname_sysfs(priv->ctx->device->ibdev_path,
244 DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
247 sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
249 DRV_LOG(DEBUG, "Cannot open IF socket.");
253 ret = ioctl(sock, SIOCGIFMTU, &request);
256 kern_mtu = request.ifr_mtu;
257 DRV_LOG(DEBUG, "MTU: current %d requested %d.", (int)kern_mtu,
259 if (kern_mtu == vhost_mtu)
261 request.ifr_mtu = vhost_mtu;
262 ret = ioctl(sock, SIOCSIFMTU, &request);
266 usleep(MLX5_VDPA_USEC);
269 return kern_mtu == vhost_mtu ? 0 : -1;
273 mlx5_vdpa_dev_close(int vid)
275 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
276 struct mlx5_vdpa_priv *priv =
277 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
281 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
284 if (priv->configured)
285 ret |= mlx5_vdpa_lm_log(priv);
286 mlx5_vdpa_err_event_unset(priv);
287 mlx5_vdpa_cqe_event_unset(priv);
288 mlx5_vdpa_steer_unset(priv);
289 mlx5_vdpa_virtqs_release(priv);
290 mlx5_vdpa_event_qp_global_release(priv);
291 mlx5_vdpa_mem_dereg(priv);
293 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
296 priv->configured = 0;
298 /* The mutex may stay locked after event thread cancel - initiate it. */
299 pthread_mutex_init(&priv->vq_config_lock, NULL);
300 DRV_LOG(INFO, "vDPA device %d was closed.", vid);
305 mlx5_vdpa_dev_config(int vid)
307 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
308 struct mlx5_vdpa_priv *priv =
309 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
312 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
315 if (priv->configured && mlx5_vdpa_dev_close(vid)) {
316 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
320 if (mlx5_vdpa_mtu_set(priv))
321 DRV_LOG(WARNING, "MTU cannot be set on device %s.",
323 if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
324 mlx5_vdpa_err_event_setup(priv) ||
325 mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
326 mlx5_vdpa_cqe_event_setup(priv)) {
327 mlx5_vdpa_dev_close(vid);
330 priv->configured = 1;
331 DRV_LOG(INFO, "vDPA device %d was configured.", vid);
336 mlx5_vdpa_get_device_fd(int vid)
338 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
339 struct mlx5_vdpa_priv *priv =
340 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
343 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
346 return priv->ctx->cmd_fd;
350 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
352 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
353 struct mlx5_vdpa_priv *priv =
354 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
358 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
362 DRV_LOG(ERR, "VAR was not created for device %s, is the device"
363 " configured?.", vdev->device->name);
366 *offset = priv->var->mmap_off;
367 *size = priv->var->length;
372 mlx5_vdpa_get_stats_names(struct rte_vdpa_device *vdev,
373 struct rte_vdpa_stat_name *stats_names,
376 static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
377 "received_descriptors",
378 "completed_descriptors",
379 "bad descriptor errors",
384 struct mlx5_vdpa_priv *priv =
385 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
389 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
393 return MLX5_VDPA_STATS_MAX;
394 size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
395 for (i = 0; i < size; ++i)
396 strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
397 RTE_VDPA_STATS_NAME_SIZE);
402 mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
403 struct rte_vdpa_stat *stats, unsigned int n)
405 struct mlx5_vdpa_priv *priv =
406 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
409 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
412 if (!priv->configured) {
413 DRV_LOG(ERR, "Device %s was not configured.",
417 if (qid >= (int)priv->nr_virtqs) {
418 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
422 if (!priv->caps.queue_counters_valid) {
423 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
427 return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
431 mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
433 struct mlx5_vdpa_priv *priv =
434 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
437 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
440 if (!priv->configured) {
441 DRV_LOG(ERR, "Device %s was not configured.",
445 if (qid >= (int)priv->nr_virtqs) {
446 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
450 if (!priv->caps.queue_counters_valid) {
451 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
455 return mlx5_vdpa_virtq_stats_reset(priv, qid);
458 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
459 .get_queue_num = mlx5_vdpa_get_queue_num,
460 .get_features = mlx5_vdpa_get_vdpa_features,
461 .get_protocol_features = mlx5_vdpa_get_protocol_features,
462 .dev_conf = mlx5_vdpa_dev_config,
463 .dev_close = mlx5_vdpa_dev_close,
464 .set_vring_state = mlx5_vdpa_set_vring_state,
465 .set_features = mlx5_vdpa_features_set,
466 .migration_done = NULL,
467 .get_vfio_group_fd = NULL,
468 .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
469 .get_notify_area = mlx5_vdpa_get_notify_area,
470 .get_stats_names = mlx5_vdpa_get_stats_names,
471 .get_stats = mlx5_vdpa_get_stats,
472 .reset_stats = mlx5_vdpa_reset_stats,
475 static struct ibv_device *
476 mlx5_vdpa_get_ib_device_match(struct rte_pci_addr *addr)
479 struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n);
480 struct ibv_device *ibv_match = NULL;
487 struct rte_pci_addr pci_addr;
489 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
490 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &pci_addr))
492 if (rte_pci_addr_cmp(addr, &pci_addr))
494 ibv_match = ibv_list[n];
499 mlx5_glue->free_device_list(ibv_list);
503 /* Try to disable ROCE by Netlink\Devlink. */
505 mlx5_vdpa_nl_roce_disable(const char *addr)
507 int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
514 devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
515 if (devlink_id < 0) {
517 DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
521 ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
523 DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
526 } else if (!enable) {
527 DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
530 ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
532 DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
534 DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
540 /* Try to disable ROCE by sysfs. */
542 mlx5_vdpa_sys_roce_disable(const char *addr)
548 MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
549 file_o = fopen(file_p, "rb");
554 ret = fscanf(file_o, "%d", &enable);
559 } else if (!enable) {
561 DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
565 file_o = fopen(file_p, "wb");
570 fprintf(file_o, "0\n");
574 DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
576 DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
582 mlx5_vdpa_roce_disable(struct rte_pci_addr *addr, struct ibv_device **ibv)
584 char addr_name[64] = {0};
586 rte_pci_device_name(addr, addr_name, sizeof(addr_name));
587 /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
588 if (mlx5_vdpa_nl_roce_disable(addr_name) == 0 ||
589 mlx5_vdpa_sys_roce_disable(addr_name) == 0) {
591 * Succeed to disable ROCE, wait for the IB device to appear
592 * again after reload.
595 struct ibv_device *ibv_new;
597 for (r = MLX5_VDPA_MAX_RETRIES; r; r--) {
598 ibv_new = mlx5_vdpa_get_ib_device_match(addr);
603 usleep(MLX5_VDPA_USEC);
605 DRV_LOG(ERR, "Cannot much device %s after ROCE disable, "
606 "retries exceed %d", addr_name, MLX5_VDPA_MAX_RETRIES);
613 mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
615 struct mlx5_vdpa_priv *priv = opaque;
617 int n_cores = sysconf(_SC_NPROCESSORS_ONLN);
619 if (strcmp(key, "class") == 0)
622 tmp = strtoul(val, NULL, 0);
624 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
627 if (strcmp(key, "event_mode") == 0) {
628 if (tmp <= MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT)
629 priv->event_mode = (int)tmp;
631 DRV_LOG(WARNING, "Invalid event_mode %s.", val);
632 } else if (strcmp(key, "event_us") == 0) {
633 priv->event_us = (uint32_t)tmp;
634 } else if (strcmp(key, "no_traffic_time") == 0) {
635 priv->no_traffic_time_s = (uint32_t)tmp;
636 } else if (strcmp(key, "event_core") == 0) {
637 if (tmp >= (unsigned long)n_cores)
638 DRV_LOG(WARNING, "Invalid event_core %s.", val);
640 priv->event_core = tmp;
641 } else if (strcmp(key, "hw_latency_mode") == 0) {
642 priv->hw_latency_mode = (uint32_t)tmp;
643 } else if (strcmp(key, "hw_max_latency_us") == 0) {
644 priv->hw_max_latency_us = (uint32_t)tmp;
645 } else if (strcmp(key, "hw_max_pending_comp") == 0) {
646 priv->hw_max_pending_comp = (uint32_t)tmp;
648 DRV_LOG(WARNING, "Invalid key %s.", key);
654 mlx5_vdpa_config_get(struct rte_devargs *devargs, struct mlx5_vdpa_priv *priv)
656 struct rte_kvargs *kvlist;
658 priv->event_mode = MLX5_VDPA_EVENT_MODE_FIXED_TIMER;
660 priv->event_core = -1;
661 priv->no_traffic_time_s = MLX5_VDPA_DEFAULT_NO_TRAFFIC_TIME_S;
664 kvlist = rte_kvargs_parse(devargs->args, NULL);
667 rte_kvargs_process(kvlist, NULL, mlx5_vdpa_args_check_handler, priv);
668 rte_kvargs_free(kvlist);
669 if (!priv->event_us &&
670 priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER)
671 priv->event_us = MLX5_VDPA_DEFAULT_TIMER_STEP_US;
672 DRV_LOG(DEBUG, "event mode is %d.", priv->event_mode);
673 DRV_LOG(DEBUG, "event_us is %u us.", priv->event_us);
674 DRV_LOG(DEBUG, "no traffic time is %u s.", priv->no_traffic_time_s);
678 * DPDK callback to register a mlx5 PCI device.
680 * This function spawns vdpa device out of a given PCI device.
683 * PCI driver structure (mlx5_vpda_driver).
685 * PCI device information.
688 * 0 on success, 1 to skip this driver, a negative errno value otherwise
689 * and rte_errno is set.
692 mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
693 struct rte_pci_device *pci_dev __rte_unused)
695 struct ibv_device *ibv;
696 struct mlx5_vdpa_priv *priv = NULL;
697 struct ibv_context *ctx = NULL;
698 struct mlx5_hca_attr attr;
701 ibv = mlx5_vdpa_get_ib_device_match(&pci_dev->addr);
703 DRV_LOG(ERR, "No matching IB device for PCI slot "
704 PCI_PRI_FMT ".", pci_dev->addr.domain,
705 pci_dev->addr.bus, pci_dev->addr.devid,
706 pci_dev->addr.function);
709 DRV_LOG(INFO, "PCI information matches for device \"%s\".",
712 if (mlx5_vdpa_roce_disable(&pci_dev->addr, &ibv) != 0) {
713 DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
717 ctx = mlx5_glue->dv_open_device(ibv);
719 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
723 ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
725 DRV_LOG(ERR, "Unable to read HCA capabilities.");
728 } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
729 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
730 "old FW/OFED version?");
734 if (!attr.vdpa.queue_counters_valid)
735 DRV_LOG(DEBUG, "No capability to support virtq statistics.");
736 priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
737 sizeof(struct mlx5_vdpa_virtq) *
738 attr.vdpa.max_num_virtio_queues * 2,
739 RTE_CACHE_LINE_SIZE);
741 DRV_LOG(ERR, "Failed to allocate private memory.");
745 priv->caps = attr.vdpa;
746 priv->log_max_rqt_size = attr.log_max_rqt_size;
747 priv->num_lag_ports = attr.num_lag_ports;
748 if (attr.num_lag_ports == 0)
749 priv->num_lag_ports = 1;
751 priv->pci_dev = pci_dev;
752 priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
754 DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
757 priv->vdev = rte_vdpa_register_device(&pci_dev->device,
759 if (priv->vdev == NULL) {
760 DRV_LOG(ERR, "Failed to register vDPA device.");
761 rte_errno = rte_errno ? rte_errno : EINVAL;
764 mlx5_vdpa_config_get(pci_dev->device.devargs, priv);
765 SLIST_INIT(&priv->mr_list);
766 pthread_mutex_init(&priv->vq_config_lock, NULL);
767 pthread_mutex_lock(&priv_list_lock);
768 TAILQ_INSERT_TAIL(&priv_list, priv, next);
769 pthread_mutex_unlock(&priv_list_lock);
775 mlx5_glue->dv_free_var(priv->var);
779 mlx5_glue->close_device(ctx);
784 * DPDK callback to remove a PCI device.
786 * This function removes all vDPA devices belong to a given PCI device.
789 * Pointer to the PCI device.
792 * 0 on success, the function cannot fail.
795 mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
797 struct mlx5_vdpa_priv *priv = NULL;
800 pthread_mutex_lock(&priv_list_lock);
801 TAILQ_FOREACH(priv, &priv_list, next) {
802 if (!rte_pci_addr_cmp(&priv->pci_dev->addr, &pci_dev->addr)) {
808 TAILQ_REMOVE(&priv_list, priv, next);
809 pthread_mutex_unlock(&priv_list_lock);
811 if (priv->configured)
812 mlx5_vdpa_dev_close(priv->vid);
814 mlx5_glue->dv_free_var(priv->var);
817 mlx5_glue->close_device(priv->ctx);
818 pthread_mutex_destroy(&priv->vq_config_lock);
824 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
826 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
827 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
830 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
831 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
834 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
835 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
838 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
839 PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
842 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
843 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
846 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
847 PCI_DEVICE_ID_MELLANOX_CONNECTX7)
850 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
851 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
858 static struct mlx5_pci_driver mlx5_vdpa_driver = {
859 .driver_class = MLX5_CLASS_VDPA,
864 .id_table = mlx5_vdpa_pci_id_map,
865 .probe = mlx5_vdpa_pci_probe,
866 .remove = mlx5_vdpa_pci_remove,
871 RTE_LOG_REGISTER(mlx5_vdpa_logtype, pmd.vdpa.mlx5, NOTICE)
874 * Driver initialization routine.
876 RTE_INIT(rte_mlx5_vdpa_init)
880 mlx5_pci_driver_register(&mlx5_vdpa_driver);
883 RTE_PMD_EXPORT_NAME(net_mlx5_vdpa, __COUNTER__);
884 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5_vdpa, mlx5_vdpa_pci_id_map);
885 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5_vdpa, "* ib_uverbs & mlx5_core & mlx5_ib");