1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
6 #include <sys/eventfd.h>
8 #include <rte_malloc.h>
11 #include <rte_alarm.h>
12 #include <rte_tailq.h>
13 #include <rte_ring_elem.h>
14 #include <rte_ring_peek.h>
16 #include <mlx5_common.h>
18 #include "mlx5_vdpa_utils.h"
19 #include "mlx5_vdpa.h"
21 static inline uint32_t
22 mlx5_vdpa_c_thrd_ring_dequeue_bulk(struct rte_ring *r,
23 void **obj, uint32_t n, uint32_t *avail)
27 m = rte_ring_dequeue_bulk_elem_start(r, obj,
28 sizeof(struct mlx5_vdpa_task), n, avail);
30 rte_ring_dequeue_elem_finish(r, n);
34 static inline uint32_t
35 mlx5_vdpa_c_thrd_ring_enqueue_bulk(struct rte_ring *r,
36 void * const *obj, uint32_t n, uint32_t *free)
40 m = rte_ring_enqueue_bulk_elem_start(r, n, free);
42 rte_ring_enqueue_elem_finish(r, obj,
43 sizeof(struct mlx5_vdpa_task), n);
48 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
52 struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
53 struct mlx5_vdpa_task task[MLX5_VDPA_TASKS_PER_DEV];
56 MLX5_ASSERT(num <= MLX5_VDPA_TASKS_PER_DEV);
57 for (i = 0 ; i < num; i++) {
59 /* To be added later. */
61 if (!mlx5_vdpa_c_thrd_ring_enqueue_bulk(rng, (void **)&task, num, NULL))
63 for (i = 0 ; i < num; i++)
64 if (task[i].remaining_cnt)
65 __atomic_fetch_add(task[i].remaining_cnt, 1,
67 /* wake up conf thread. */
68 pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
69 pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
70 pthread_mutex_unlock(&conf_thread_mng.cthrd_lock);
75 mlx5_vdpa_c_thread_handle(void *arg)
77 struct mlx5_vdpa_conf_thread_mng *multhrd = arg;
78 pthread_t thread_id = pthread_self();
79 struct mlx5_vdpa_priv *priv;
80 struct mlx5_vdpa_task task;
85 for (thrd_idx = 0; thrd_idx < multhrd->max_thrds;
87 if (multhrd->cthrd[thrd_idx].tid == thread_id)
89 if (thrd_idx >= multhrd->max_thrds)
91 rng = multhrd->cthrd[thrd_idx].rng;
93 task_num = mlx5_vdpa_c_thrd_ring_dequeue_bulk(rng,
94 (void **)&task, 1, NULL);
96 /* No task and condition wait. */
97 pthread_mutex_lock(&multhrd->cthrd_lock);
99 &multhrd->cthrd[thrd_idx].c_cond,
100 &multhrd->cthrd_lock);
101 pthread_mutex_unlock(&multhrd->cthrd_lock);
106 __atomic_fetch_sub(task.remaining_cnt,
107 1, __ATOMIC_RELAXED);
108 /* To be added later. */
114 mlx5_vdpa_c_thread_destroy(uint32_t thrd_idx, bool need_unlock)
116 if (conf_thread_mng.cthrd[thrd_idx].tid) {
117 pthread_cancel(conf_thread_mng.cthrd[thrd_idx].tid);
118 pthread_join(conf_thread_mng.cthrd[thrd_idx].tid, NULL);
119 conf_thread_mng.cthrd[thrd_idx].tid = 0;
121 pthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL);
123 if (conf_thread_mng.cthrd[thrd_idx].rng) {
124 rte_ring_free(conf_thread_mng.cthrd[thrd_idx].rng);
125 conf_thread_mng.cthrd[thrd_idx].rng = NULL;
130 mlx5_vdpa_c_thread_create(int cpu_core)
132 const struct sched_param sp = {
133 .sched_priority = sched_get_priority_max(SCHED_RR),
142 pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
143 pthread_attr_init(&attr);
144 ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
146 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
149 ret = pthread_attr_setschedparam(&attr, &sp);
151 DRV_LOG(ERR, "Failed to set thread priority.");
154 ring_num = MLX5_VDPA_MAX_TASKS_PER_THRD / conf_thread_mng.max_thrds;
156 DRV_LOG(ERR, "Invalid ring number for thread.");
159 for (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;
161 snprintf(name, sizeof(name), "vDPA-mthread-ring-%d",
163 conf_thread_mng.cthrd[thrd_idx].rng = rte_ring_create_elem(name,
164 sizeof(struct mlx5_vdpa_task), ring_num,
166 RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |
168 if (!conf_thread_mng.cthrd[thrd_idx].rng) {
170 "Failed to create vdpa multi-threads %d ring.",
174 ret = pthread_create(&conf_thread_mng.cthrd[thrd_idx].tid,
175 &attr, mlx5_vdpa_c_thread_handle,
176 (void *)&conf_thread_mng);
178 DRV_LOG(ERR, "Failed to create vdpa multi-threads %d.",
184 CPU_SET(cpu_core, &cpuset);
186 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
187 ret = pthread_setaffinity_np(
188 conf_thread_mng.cthrd[thrd_idx].tid,
189 sizeof(cpuset), &cpuset);
191 DRV_LOG(ERR, "Failed to set thread affinity for "
192 "vdpa multi-threads %d.", thrd_idx);
195 snprintf(name, sizeof(name), "vDPA-mthread-%d", thrd_idx);
196 ret = pthread_setname_np(
197 conf_thread_mng.cthrd[thrd_idx].tid, name);
199 DRV_LOG(ERR, "Failed to set vdpa multi-threads name %s.",
202 DRV_LOG(DEBUG, "Thread name: %s.", name);
203 pthread_cond_init(&conf_thread_mng.cthrd[thrd_idx].c_cond,
206 pthread_mutex_unlock(&conf_thread_mng.cthrd_lock);
209 for (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;
211 mlx5_vdpa_c_thread_destroy(thrd_idx, false);
212 pthread_mutex_unlock(&conf_thread_mng.cthrd_lock);
217 mlx5_vdpa_mult_threads_create(int cpu_core)
219 pthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL);
220 if (mlx5_vdpa_c_thread_create(cpu_core)) {
221 DRV_LOG(ERR, "Cannot create vDPA configuration threads.");
222 mlx5_vdpa_mult_threads_destroy(false);
229 mlx5_vdpa_mult_threads_destroy(bool need_unlock)
233 if (!conf_thread_mng.initializer_priv)
235 for (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;
237 mlx5_vdpa_c_thread_destroy(thrd_idx, need_unlock);
238 pthread_mutex_destroy(&conf_thread_mng.cthrd_lock);
239 memset(&conf_thread_mng, 0, sizeof(struct mlx5_vdpa_conf_thread_mng));