1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/socket.h>
10 #include <netinet/in.h>
12 #include <rte_malloc.h>
14 #include <rte_errno.h>
15 #include <rte_string_fns.h>
16 #include <rte_bus_pci.h>
17 #include <rte_eal_paging.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_defs.h>
22 #include <mlx5_devx_cmds.h>
26 #include "mlx5_vdpa_utils.h"
27 #include "mlx5_vdpa.h"
29 #define MLX5_VDPA_DRIVER_NAME vdpa_mlx5
31 #define MLX5_VDPA_DEFAULT_FEATURES ((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
32 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
33 (1ULL << VIRTIO_NET_F_MQ) | \
34 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
35 (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
36 (1ULL << VHOST_F_LOG_ALL) | \
37 (1ULL << VIRTIO_NET_F_MTU))
39 #define MLX5_VDPA_PROTOCOL_FEATURES \
40 ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
41 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
42 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
43 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \
44 (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
45 (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
46 (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
48 #define MLX5_VDPA_DEFAULT_NO_TRAFFIC_MAX 16LLU
50 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
51 TAILQ_HEAD_INITIALIZER(priv_list);
52 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
54 struct mlx5_vdpa_conf_thread_mng conf_thread_mng;
56 static void mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv);
58 static struct mlx5_vdpa_priv *
59 mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
61 struct mlx5_vdpa_priv *priv;
64 pthread_mutex_lock(&priv_list_lock);
65 TAILQ_FOREACH(priv, &priv_list, next) {
66 if (vdev == priv->vdev) {
71 pthread_mutex_unlock(&priv_list_lock);
73 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
81 mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
83 struct mlx5_vdpa_priv *priv =
84 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
87 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
90 *queue_num = priv->caps.max_num_virtio_queues / 2;
95 mlx5_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
97 struct mlx5_vdpa_priv *priv =
98 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
101 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
104 *features = MLX5_VDPA_DEFAULT_FEATURES;
105 if (priv->caps.virtio_queue_type & (1 << MLX5_VIRTQ_TYPE_PACKED))
106 *features |= (1ULL << VIRTIO_F_RING_PACKED);
107 if (priv->caps.tso_ipv4)
108 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
109 if (priv->caps.tso_ipv6)
110 *features |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
111 if (priv->caps.tx_csum)
112 *features |= (1ULL << VIRTIO_NET_F_CSUM);
113 if (priv->caps.rx_csum)
114 *features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
115 if (priv->caps.virtio_version_1_0)
116 *features |= (1ULL << VIRTIO_F_VERSION_1);
121 mlx5_vdpa_get_protocol_features(struct rte_vdpa_device *vdev,
124 struct mlx5_vdpa_priv *priv =
125 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
128 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
131 *features = MLX5_VDPA_PROTOCOL_FEATURES;
136 mlx5_vdpa_set_vring_state(int vid, int vring, int state)
138 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
139 struct mlx5_vdpa_priv *priv =
140 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
141 struct mlx5_vdpa_virtq *virtq;
145 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
148 if (vring >= (int)priv->caps.max_num_virtio_queues) {
149 DRV_LOG(ERR, "Too big vring id: %d.", vring);
152 virtq = &priv->virtqs[vring];
153 pthread_mutex_lock(&virtq->virtq_lock);
154 ret = mlx5_vdpa_virtq_enable(priv, vring, state);
155 pthread_mutex_unlock(&virtq->virtq_lock);
160 mlx5_vdpa_features_set(int vid)
162 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
163 struct mlx5_vdpa_priv *priv =
164 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
165 uint64_t log_base, log_size;
170 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
173 ret = rte_vhost_get_negotiated_features(vid, &features);
175 DRV_LOG(ERR, "Failed to get negotiated features.");
178 if (RTE_VHOST_NEED_LOG(features)) {
179 ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
181 DRV_LOG(ERR, "Failed to get log base.");
184 ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
186 DRV_LOG(ERR, "Failed to set dirty bitmap.");
189 DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
190 ret = mlx5_vdpa_logging_enable(priv, 1);
192 DRV_LOG(ERR, "Failed t enable dirty logging.");
200 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
202 struct ifreq request;
203 uint16_t vhost_mtu = 0;
204 uint16_t kern_mtu = 0;
205 int ret = rte_vhost_get_mtu(priv->vid, &vhost_mtu);
207 int retries = MLX5_VDPA_MAX_RETRIES;
210 DRV_LOG(DEBUG, "Cannot get vhost MTU - %d.", ret);
214 DRV_LOG(DEBUG, "Vhost MTU is 0.");
217 ret = mlx5_get_ifname_sysfs
218 (mlx5_os_get_ctx_device_name(priv->cdev->ctx),
221 DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
224 sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
226 DRV_LOG(DEBUG, "Cannot open IF socket.");
230 ret = ioctl(sock, SIOCGIFMTU, &request);
233 kern_mtu = request.ifr_mtu;
234 DRV_LOG(DEBUG, "MTU: current %d requested %d.", (int)kern_mtu,
236 if (kern_mtu == vhost_mtu)
238 request.ifr_mtu = vhost_mtu;
239 ret = ioctl(sock, SIOCSIFMTU, &request);
243 usleep(MLX5_VDPA_USEC);
246 return kern_mtu == vhost_mtu ? 0 : -1;
250 mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)
252 /* Clean pre-created resource in dev removal only. */
254 mlx5_vdpa_virtqs_cleanup(priv);
255 mlx5_vdpa_mem_dereg(priv);
259 mlx5_vdpa_wait_dev_close_tasks_done(struct mlx5_vdpa_priv *priv)
261 uint32_t timeout = 0;
263 /* Check and wait all close tasks done. */
264 while (__atomic_load_n(&priv->dev_close_progress,
265 __ATOMIC_RELAXED) != 0 && timeout < 1000) {
266 rte_delay_us_sleep(10000);
269 if (priv->dev_close_progress) {
271 "Failed to wait close device tasks done vid %d.",
279 _internal_mlx5_vdpa_dev_close(struct mlx5_vdpa_priv *priv,
280 bool release_resource)
285 mlx5_vdpa_cqe_event_unset(priv);
286 if (priv->state == MLX5_VDPA_STATE_CONFIGURED) {
287 ret |= mlx5_vdpa_lm_log(priv);
288 priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
290 if (priv->use_c_thread && !release_resource) {
291 if (priv->last_c_thrd_idx >=
292 (conf_thread_mng.max_thrds - 1))
293 priv->last_c_thrd_idx = 0;
295 priv->last_c_thrd_idx++;
296 __atomic_store_n(&priv->dev_close_progress,
297 1, __ATOMIC_RELAXED);
298 if (mlx5_vdpa_task_add(priv,
299 priv->last_c_thrd_idx,
300 MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
301 NULL, NULL, NULL, 1)) {
303 "Fail to add dev close task. ");
306 priv->state = MLX5_VDPA_STATE_PROBED;
307 DRV_LOG(INFO, "vDPA device %d was closed.", vid);
311 pthread_mutex_lock(&priv->steer_update_lock);
312 mlx5_vdpa_steer_unset(priv);
313 pthread_mutex_unlock(&priv->steer_update_lock);
314 mlx5_vdpa_virtqs_release(priv, release_resource);
315 mlx5_vdpa_drain_cq(priv);
316 if (priv->lm_mr.addr)
317 mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
318 if (!priv->connected)
319 mlx5_vdpa_dev_cache_clean(priv);
321 __atomic_store_n(&priv->dev_close_progress, 0,
323 priv->state = MLX5_VDPA_STATE_PROBED;
324 DRV_LOG(INFO, "vDPA device %d was closed.", vid);
329 mlx5_vdpa_dev_close(int vid)
331 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
332 struct mlx5_vdpa_priv *priv;
335 DRV_LOG(ERR, "Invalid vDPA device.");
338 priv = mlx5_vdpa_find_priv_resource_by_vdev(vdev);
340 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
343 return _internal_mlx5_vdpa_dev_close(priv, false);
347 mlx5_vdpa_dev_config(int vid)
349 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
350 struct mlx5_vdpa_priv *priv =
351 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
354 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
357 if (priv->state == MLX5_VDPA_STATE_CONFIGURED &&
358 mlx5_vdpa_dev_close(vid)) {
359 DRV_LOG(ERR, "Failed to reconfigure vid %d.", vid);
362 if (mlx5_vdpa_wait_dev_close_tasks_done(priv))
365 priv->connected = true;
366 if (mlx5_vdpa_mtu_set(priv))
367 DRV_LOG(WARNING, "MTU cannot be set on device %s.",
369 if (mlx5_vdpa_mem_register(priv) ||
370 mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
371 mlx5_vdpa_cqe_event_setup(priv)) {
372 mlx5_vdpa_dev_close(vid);
375 priv->state = MLX5_VDPA_STATE_CONFIGURED;
376 DRV_LOG(INFO, "vDPA device %d was configured.", vid);
381 mlx5_vdpa_get_device_fd(int vid)
383 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
384 struct mlx5_vdpa_priv *priv =
385 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
388 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
391 return ((struct ibv_context *)priv->cdev->ctx)->cmd_fd;
395 mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
397 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
398 struct mlx5_vdpa_priv *priv =
399 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
403 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
407 DRV_LOG(ERR, "VAR was not created for device %s, is the device"
408 " configured?.", vdev->device->name);
411 *offset = priv->var->mmap_off;
412 *size = priv->var->length;
417 mlx5_vdpa_get_stats_names(struct rte_vdpa_device *vdev,
418 struct rte_vdpa_stat_name *stats_names,
421 static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
422 "received_descriptors",
423 "completed_descriptors",
424 "bad descriptor errors",
429 struct mlx5_vdpa_priv *priv =
430 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
434 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
438 return MLX5_VDPA_STATS_MAX;
439 size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
440 for (i = 0; i < size; ++i)
441 strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
442 RTE_VDPA_STATS_NAME_SIZE);
447 mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
448 struct rte_vdpa_stat *stats, unsigned int n)
450 struct mlx5_vdpa_priv *priv =
451 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
454 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
457 if (qid >= (int)priv->caps.max_num_virtio_queues) {
458 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
462 if (!priv->caps.queue_counters_valid) {
463 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
467 return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
471 mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
473 struct mlx5_vdpa_priv *priv =
474 mlx5_vdpa_find_priv_resource_by_vdev(vdev);
477 DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
480 if (qid >= (int)priv->caps.max_num_virtio_queues) {
481 DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
485 if (!priv->caps.queue_counters_valid) {
486 DRV_LOG(ERR, "Virtq statistics is not supported for device %s.",
490 return mlx5_vdpa_virtq_stats_reset(priv, qid);
494 mlx5_vdpa_dev_cleanup(int vid)
496 struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
497 struct mlx5_vdpa_priv *priv;
501 priv = mlx5_vdpa_find_priv_resource_by_vdev(vdev);
503 DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
506 if (priv->state == MLX5_VDPA_STATE_PROBED) {
507 if (priv->use_c_thread)
508 mlx5_vdpa_wait_dev_close_tasks_done(priv);
509 mlx5_vdpa_dev_cache_clean(priv);
511 priv->connected = false;
515 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
516 .get_queue_num = mlx5_vdpa_get_queue_num,
517 .get_features = mlx5_vdpa_get_vdpa_features,
518 .get_protocol_features = mlx5_vdpa_get_protocol_features,
519 .dev_conf = mlx5_vdpa_dev_config,
520 .dev_close = mlx5_vdpa_dev_close,
521 .dev_cleanup = mlx5_vdpa_dev_cleanup,
522 .set_vring_state = mlx5_vdpa_set_vring_state,
523 .set_features = mlx5_vdpa_features_set,
524 .migration_done = NULL,
525 .get_vfio_group_fd = NULL,
526 .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
527 .get_notify_area = mlx5_vdpa_get_notify_area,
528 .get_stats_names = mlx5_vdpa_get_stats_names,
529 .get_stats = mlx5_vdpa_get_stats,
530 .reset_stats = mlx5_vdpa_reset_stats,
534 mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
536 struct mlx5_vdpa_priv *priv = opaque;
538 int n_cores = sysconf(_SC_NPROCESSORS_ONLN);
541 tmp = strtoul(val, NULL, 0);
543 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
546 if (strcmp(key, "event_mode") == 0) {
547 if (tmp <= MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT)
548 priv->event_mode = (int)tmp;
550 DRV_LOG(WARNING, "Invalid event_mode %s.", val);
551 } else if (strcmp(key, "event_us") == 0) {
552 priv->event_us = (uint32_t)tmp;
553 } else if (strcmp(key, "no_traffic_time") == 0) {
554 priv->no_traffic_max = (uint32_t)tmp;
555 } else if (strcmp(key, "event_core") == 0) {
556 if (tmp >= (unsigned long)n_cores)
557 DRV_LOG(WARNING, "Invalid event_core %s.", val);
559 priv->event_core = tmp;
560 } else if (strcmp(key, "max_conf_threads") == 0) {
562 priv->use_c_thread = true;
563 if (!conf_thread_mng.initializer_priv) {
564 conf_thread_mng.initializer_priv = priv;
565 if (tmp > MLX5_VDPA_MAX_C_THRD) {
567 "Invalid max_conf_threads %s "
568 "and set max_conf_threads to %d",
569 val, MLX5_VDPA_MAX_C_THRD);
570 tmp = MLX5_VDPA_MAX_C_THRD;
572 conf_thread_mng.max_thrds = tmp;
573 } else if (tmp != conf_thread_mng.max_thrds) {
575 "max_conf_threads is PMD argument and not per device, "
576 "only the first device configuration set it, current value is %d "
577 "and will not be changed to %d.",
578 conf_thread_mng.max_thrds, (int)tmp);
581 priv->use_c_thread = false;
583 } else if (strcmp(key, "hw_latency_mode") == 0) {
584 priv->hw_latency_mode = (uint32_t)tmp;
585 } else if (strcmp(key, "hw_max_latency_us") == 0) {
586 priv->hw_max_latency_us = (uint32_t)tmp;
587 } else if (strcmp(key, "hw_max_pending_comp") == 0) {
588 priv->hw_max_pending_comp = (uint32_t)tmp;
589 } else if (strcmp(key, "queue_size") == 0) {
590 priv->queue_size = (uint16_t)tmp;
591 } else if (strcmp(key, "queues") == 0) {
592 priv->queues = (uint16_t)tmp;
594 DRV_LOG(WARNING, "Invalid key %s.", key);
600 mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
601 struct mlx5_vdpa_priv *priv)
603 const char **params = (const char *[]){
609 "hw_max_pending_comp",
617 priv->event_mode = MLX5_VDPA_EVENT_MODE_FIXED_TIMER;
619 priv->event_core = -1;
620 priv->no_traffic_max = MLX5_VDPA_DEFAULT_NO_TRAFFIC_MAX;
623 mlx5_kvargs_process(mkvlist, params, mlx5_vdpa_args_check_handler,
625 if (!priv->event_us &&
626 priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER)
627 priv->event_us = MLX5_VDPA_DEFAULT_TIMER_STEP_US;
628 if ((priv->queue_size && !priv->queues) ||
629 (!priv->queue_size && priv->queues)) {
630 priv->queue_size = 0;
632 DRV_LOG(WARNING, "Please provide both queue_size and queues.");
634 DRV_LOG(DEBUG, "event mode is %d.", priv->event_mode);
635 DRV_LOG(DEBUG, "event_us is %u us.", priv->event_us);
636 DRV_LOG(DEBUG, "no traffic max is %u.", priv->no_traffic_max);
637 DRV_LOG(DEBUG, "queues is %u, queue_size is %u.", priv->queues,
642 mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv)
644 uint32_t max_queues, index;
645 struct mlx5_vdpa_virtq *virtq;
647 if (!priv->queues || !priv->queue_size)
649 max_queues = ((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?
650 (priv->queues * 2) : (priv->caps.max_num_virtio_queues);
651 if (mlx5_vdpa_is_modify_virtq_supported(priv))
652 mlx5_vdpa_steer_unset(priv);
653 for (index = 0; index < max_queues; ++index) {
654 virtq = &priv->virtqs[index];
656 pthread_mutex_lock(&virtq->virtq_lock);
657 mlx5_vdpa_virtq_unset(virtq);
658 pthread_mutex_unlock(&virtq->virtq_lock);
664 mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
666 uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
667 uint32_t max_queues, index, thrd_idx, data[1];
668 struct mlx5_vdpa_virtq *virtq;
670 for (index = 0; index < priv->caps.max_num_virtio_queues;
672 virtq = &priv->virtqs[index];
673 pthread_mutex_init(&virtq->virtq_lock, NULL);
675 if (!priv->queues || !priv->queue_size)
677 max_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?
678 (priv->queues * 2) : (priv->caps.max_num_virtio_queues);
679 if (priv->use_c_thread) {
680 uint32_t main_task_idx[max_queues];
682 for (index = 0; index < max_queues; ++index) {
683 thrd_idx = index % (conf_thread_mng.max_thrds + 1);
685 main_task_idx[task_num] = index;
689 thrd_idx = priv->last_c_thrd_idx + 1;
690 if (thrd_idx >= conf_thread_mng.max_thrds)
692 priv->last_c_thrd_idx = thrd_idx;
694 if (mlx5_vdpa_task_add(priv, thrd_idx,
695 MLX5_VDPA_TASK_PREPARE_VIRTQ,
696 &remaining_cnt, &err_cnt,
697 (void **)&data, 1)) {
698 DRV_LOG(ERR, "Fail to add "
699 "task prepare virtq (%d).", index);
700 main_task_idx[task_num] = index;
704 for (index = 0; index < task_num; ++index)
705 if (mlx5_vdpa_virtq_single_resource_prepare(priv,
706 main_task_idx[index]))
708 if (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,
711 "Failed to wait virt-queue prepare tasks ready.");
715 for (index = 0; index < max_queues; ++index)
716 if (mlx5_vdpa_virtq_single_resource_prepare(priv,
720 if (mlx5_vdpa_is_modify_virtq_supported(priv))
721 if (mlx5_vdpa_steer_update(priv, true))
725 mlx5_vdpa_prepare_virtq_destroy(priv);
730 mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)
732 struct mlx5_devx_tis_attr tis_attr = {0};
733 struct ibv_context *ctx = priv->cdev->ctx;
737 for (retry = 0; retry < 7; retry++) {
738 priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
739 if (priv->var != NULL)
741 DRV_LOG(WARNING, "Failed to allocate VAR, retry %d.", retry);
742 /* Wait Qemu release VAR during vdpa restart, 0.1 sec based. */
743 usleep(100000U << retry);
746 DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
750 /* Always map the entire page. */
751 priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
752 PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
753 priv->var->mmap_off);
754 if (priv->virtq_db_addr == MAP_FAILED) {
755 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
756 priv->virtq_db_addr = NULL;
760 /* Add within page offset for 64K page system. */
761 priv->virtq_db_addr = (char *)priv->virtq_db_addr +
762 ((rte_mem_page_size() - 1) & priv->caps.doorbell_bar_offset);
763 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
764 priv->virtq_db_addr);
765 priv->td = mlx5_devx_cmd_create_td(ctx);
767 DRV_LOG(ERR, "Failed to create transport domain.");
771 tis_attr.transport_domain = priv->td->id;
772 for (i = 0; i < priv->num_lag_ports; i++) {
773 /* 0 is auto affinity, non-zero value to propose port. */
774 tis_attr.lag_tx_port_affinity = i + 1;
775 priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
776 if (!priv->tiss[i]) {
777 DRV_LOG(ERR, "Failed to create TIS %u.", i);
781 priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
782 if (!priv->null_mr) {
783 DRV_LOG(ERR, "Failed to allocate null MR.");
787 DRV_LOG(DEBUG, "Dump fill Mkey = %u.", priv->null_mr->lkey);
788 #ifdef HAVE_MLX5DV_DR
789 priv->steer.domain = mlx5_glue->dr_create_domain(ctx,
790 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
791 if (!priv->steer.domain) {
792 DRV_LOG(ERR, "Failed to create Rx domain.");
797 priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
798 if (!priv->steer.tbl) {
799 DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
803 if (mlx5_vdpa_err_event_setup(priv) != 0)
805 if (mlx5_vdpa_event_qp_global_prepare(priv))
807 if (mlx5_vdpa_virtq_resource_prepare(priv))
813 mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
814 struct mlx5_kvargs_ctrl *mkvlist)
816 struct mlx5_vdpa_priv *priv = NULL;
817 struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
819 if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
820 DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
821 "old FW/OFED version?");
825 if (!attr->vdpa.queue_counters_valid)
826 DRV_LOG(DEBUG, "No capability to support virtq statistics.");
827 priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
828 sizeof(struct mlx5_vdpa_virtq) *
829 attr->vdpa.max_num_virtio_queues,
830 RTE_CACHE_LINE_SIZE);
832 DRV_LOG(ERR, "Failed to allocate private memory.");
836 priv->caps = attr->vdpa;
837 priv->log_max_rqt_size = attr->log_max_rqt_size;
838 priv->num_lag_ports = attr->num_lag_ports;
839 if (attr->num_lag_ports == 0)
840 priv->num_lag_ports = 1;
841 rte_spinlock_init(&priv->db_lock);
842 pthread_mutex_init(&priv->steer_update_lock, NULL);
844 mlx5_vdpa_config_get(mkvlist, priv);
845 if (priv->use_c_thread) {
846 if (conf_thread_mng.initializer_priv == priv)
847 if (mlx5_vdpa_mult_threads_create(priv->event_core))
849 __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
852 if (mlx5_vdpa_create_dev_resources(priv))
854 priv->vdev = rte_vdpa_register_device(cdev->dev, &mlx5_vdpa_ops);
855 if (priv->vdev == NULL) {
856 DRV_LOG(ERR, "Failed to register vDPA device.");
857 rte_errno = rte_errno ? rte_errno : EINVAL;
860 pthread_mutex_lock(&priv_list_lock);
861 TAILQ_INSERT_TAIL(&priv_list, priv, next);
862 pthread_mutex_unlock(&priv_list_lock);
865 if (conf_thread_mng.initializer_priv == priv)
866 mlx5_vdpa_mult_threads_destroy(false);
868 mlx5_vdpa_dev_release(priv);
873 mlx5_vdpa_dev_remove(struct mlx5_common_device *cdev)
875 struct mlx5_vdpa_priv *priv = NULL;
878 pthread_mutex_lock(&priv_list_lock);
879 TAILQ_FOREACH(priv, &priv_list, next) {
880 if (priv->vdev->device == cdev->dev) {
886 TAILQ_REMOVE(&priv_list, priv, next);
887 pthread_mutex_unlock(&priv_list_lock);
889 mlx5_vdpa_dev_release(priv);
894 mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
899 mlx5_vdpa_virtqs_cleanup(priv);
900 mlx5_vdpa_dev_cache_clean(priv);
901 for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
902 if (!priv->virtqs[i].counters)
904 claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
906 mlx5_vdpa_event_qp_global_release(priv);
907 mlx5_vdpa_err_event_unset(priv);
909 claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
910 if (priv->steer.domain)
911 claim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));
913 claim_zero(mlx5_glue->dereg_mr(priv->null_mr));
914 for (i = 0; i < priv->num_lag_ports; i++) {
916 claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
919 claim_zero(mlx5_devx_cmd_destroy(priv->td));
920 if (priv->virtq_db_addr)
921 /* Mask out the within page offset for munmap. */
922 claim_zero(munmap((void *)((uintptr_t)priv->virtq_db_addr &
923 ~(rte_mem_page_size() - 1)), priv->var->length));
925 mlx5_glue->dv_free_var(priv->var);
929 mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)
931 if (priv->state == MLX5_VDPA_STATE_CONFIGURED)
932 _internal_mlx5_vdpa_dev_close(priv, true);
933 if (priv->use_c_thread)
934 mlx5_vdpa_wait_dev_close_tasks_done(priv);
935 mlx5_vdpa_release_dev_resources(priv);
937 rte_vdpa_unregister_device(priv->vdev);
938 if (priv->use_c_thread)
939 if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
940 1, __ATOMIC_RELAXED) == 1)
941 mlx5_vdpa_mult_threads_destroy(true);
945 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
947 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
948 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
951 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
952 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
955 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
956 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
959 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
960 PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
963 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
964 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
967 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
968 PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
971 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
972 PCI_DEVICE_ID_MELLANOX_CONNECTX7)
975 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
976 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
983 static struct mlx5_class_driver mlx5_vdpa_driver = {
984 .drv_class = MLX5_CLASS_VDPA,
985 .name = RTE_STR(MLX5_VDPA_DRIVER_NAME),
986 .id_table = mlx5_vdpa_pci_id_map,
987 .probe = mlx5_vdpa_dev_probe,
988 .remove = mlx5_vdpa_dev_remove,
991 RTE_LOG_REGISTER_DEFAULT(mlx5_vdpa_logtype, NOTICE)
994 * Driver initialization routine.
996 RTE_INIT(rte_mlx5_vdpa_init)
1000 mlx5_class_driver_register(&mlx5_vdpa_driver);
1003 RTE_PMD_EXPORT_NAME(MLX5_VDPA_DRIVER_NAME, __COUNTER__);
1004 RTE_PMD_REGISTER_PCI_TABLE(MLX5_VDPA_DRIVER_NAME, mlx5_vdpa_pci_id_map);
1005 RTE_PMD_REGISTER_KMOD_DEP(MLX5_VDPA_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");