common/mlx5: fix post doorbell barrier
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         mlx5_devx_uar_release(&priv->uar);
34 #ifdef HAVE_IBV_DEVX_EVENT
35         if (priv->eventc) {
36                 mlx5_os_devx_destroy_event_channel(priv->eventc);
37                 priv->eventc = NULL;
38         }
39 #endif
40 }
41
42 /* Prepare all the global resources for all the event objects.*/
43 static int
44 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
45 {
46         if (priv->eventc)
47                 return 0;
48         priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
49                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
50         if (!priv->eventc) {
51                 rte_errno = errno;
52                 DRV_LOG(ERR, "Failed to create event channel %d.",
53                         rte_errno);
54                 goto error;
55         }
56         if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
57                 DRV_LOG(ERR, "Failed to allocate UAR.");
58                 goto error;
59         }
60         return 0;
61 error:
62         mlx5_vdpa_event_qp_global_release(priv);
63         return -1;
64 }
65
66 static void
67 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
68 {
69         mlx5_devx_cq_destroy(&cq->cq_obj);
70         memset(cq, 0, sizeof(*cq));
71 }
72
73 static inline void __rte_unused
74 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
75 {
76         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
77         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
78         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
79         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
80         uint64_t db_be = rte_cpu_to_be_64(doorbell);
81
82         mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
83                            &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
84         cq->arm_sn++;
85         cq->armed = 1;
86 }
87
88 static int
89 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
90                     int callfd, struct mlx5_vdpa_cq *cq)
91 {
92         struct mlx5_devx_cq_attr attr = {
93                 .use_first_only = 1,
94                 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
95         };
96         uint16_t event_nums[1] = {0};
97         int ret;
98
99         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
100                                   &attr, SOCKET_ID_ANY);
101         if (ret)
102                 goto error;
103         cq->cq_ci = 0;
104         cq->log_desc_n = log_desc_n;
105         rte_spinlock_init(&cq->sl);
106         /* Subscribe CQ event to the event channel controlled by the driver. */
107         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
108                                                 cq->cq_obj.cq->obj,
109                                                 sizeof(event_nums), event_nums,
110                                                 (uint64_t)(uintptr_t)cq);
111         if (ret) {
112                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
113                 rte_errno = errno;
114                 goto error;
115         }
116         cq->callfd = callfd;
117         /* Init CQ to ones to be in HW owner in the start. */
118         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
119         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
120         /* First arming. */
121         mlx5_vdpa_cq_arm(priv, cq);
122         return 0;
123 error:
124         mlx5_vdpa_cq_destroy(cq);
125         return -1;
126 }
127
128 static inline uint32_t
129 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
130 {
131         struct mlx5_vdpa_event_qp *eqp =
132                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
133         const unsigned int cq_size = 1 << cq->log_desc_n;
134         union {
135                 struct {
136                         uint16_t wqe_counter;
137                         uint8_t rsvd5;
138                         uint8_t op_own;
139                 };
140                 uint32_t word;
141         } last_word;
142         uint16_t next_wqe_counter = cq->cq_ci;
143         uint16_t cur_wqe_counter;
144         uint16_t comp;
145
146         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
147         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
148         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
149         if (comp) {
150                 cq->cq_ci += comp;
151                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
152                             MLX5_CQE_INVALID);
153                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
154                                MLX5_CQE_RESP_ERR ||
155                                MLX5_CQE_OPCODE(last_word.op_own) ==
156                                MLX5_CQE_REQ_ERR)))
157                         cq->errors++;
158                 rte_io_wmb();
159                 /* Ring CQ doorbell record. */
160                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
161                 rte_io_wmb();
162                 /* Ring SW QP doorbell record. */
163                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
164         }
165         return comp;
166 }
167
168 static void
169 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
170 {
171         struct mlx5_vdpa_cq *cq;
172         int i;
173
174         for (i = 0; i < priv->nr_virtqs; i++) {
175                 cq = &priv->virtqs[i].eqp.cq;
176                 if (cq->cq_obj.cq && !cq->armed)
177                         mlx5_vdpa_cq_arm(priv, cq);
178         }
179 }
180
181 static void
182 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
183 {
184         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
185                 switch (max) {
186                 case 0:
187                         priv->timer_delay_us += priv->event_us;
188                         break;
189                 case 1:
190                         break;
191                 default:
192                         priv->timer_delay_us /= max;
193                         break;
194                 }
195         }
196         if (priv->timer_delay_us)
197                 usleep(priv->timer_delay_us);
198         else
199                 /* Give-up CPU to improve polling threads scheduling. */
200                 sched_yield();
201 }
202
203 /* Notify virtio device for specific virtq new traffic. */
204 static uint32_t
205 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
206 {
207         uint32_t comp = 0;
208
209         if (cq->cq_obj.cq) {
210                 comp = mlx5_vdpa_cq_poll(cq);
211                 if (comp) {
212                         if (cq->callfd != -1)
213                                 eventfd_write(cq->callfd, (eventfd_t)1);
214                         cq->armed = 0;
215                 }
216         }
217         return comp;
218 }
219
220 /* Notify virtio device for any virtq new traffic. */
221 static uint32_t
222 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
223 {
224         int i;
225         uint32_t max = 0;
226
227         for (i = 0; i < priv->nr_virtqs; i++) {
228                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
229                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
230
231                 if (comp > max)
232                         max = comp;
233         }
234         return max;
235 }
236
237 /* Wait on all CQs channel for completion event. */
238 static struct mlx5_vdpa_cq *
239 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
240 {
241 #ifdef HAVE_IBV_DEVX_EVENT
242         union {
243                 struct mlx5dv_devx_async_event_hdr event_resp;
244                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
245         } out;
246         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
247                                             sizeof(out.buf));
248
249         if (ret >= 0)
250                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
251         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
252                 ret, errno);
253 #endif
254         return NULL;
255 }
256
257 static void *
258 mlx5_vdpa_event_handle(void *arg)
259 {
260         struct mlx5_vdpa_priv *priv = arg;
261         struct mlx5_vdpa_cq *cq;
262         uint32_t max;
263
264         switch (priv->event_mode) {
265         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
266         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
267                 priv->timer_delay_us = priv->event_us;
268                 while (1) {
269                         pthread_mutex_lock(&priv->vq_config_lock);
270                         max = mlx5_vdpa_queues_complete(priv);
271                         if (max == 0 && priv->no_traffic_counter++ >=
272                             priv->no_traffic_max) {
273                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
274                                         priv->vdev->device->name);
275                                 mlx5_vdpa_arm_all_cqs(priv);
276                                 do {
277                                         pthread_mutex_unlock
278                                                         (&priv->vq_config_lock);
279                                         cq = mlx5_vdpa_event_wait(priv);
280                                         pthread_mutex_lock
281                                                         (&priv->vq_config_lock);
282                                         if (cq == NULL ||
283                                                mlx5_vdpa_queue_complete(cq) > 0)
284                                                 break;
285                                 } while (1);
286                                 priv->timer_delay_us = priv->event_us;
287                                 priv->no_traffic_counter = 0;
288                         } else if (max != 0) {
289                                 priv->no_traffic_counter = 0;
290                         }
291                         pthread_mutex_unlock(&priv->vq_config_lock);
292                         mlx5_vdpa_timer_sleep(priv, max);
293                 }
294                 return NULL;
295         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
296                 do {
297                         cq = mlx5_vdpa_event_wait(priv);
298                         if (cq != NULL) {
299                                 pthread_mutex_lock(&priv->vq_config_lock);
300                                 if (mlx5_vdpa_queue_complete(cq) > 0)
301                                         mlx5_vdpa_cq_arm(priv, cq);
302                                 pthread_mutex_unlock(&priv->vq_config_lock);
303                         }
304                 } while (1);
305                 return NULL;
306         default:
307                 return NULL;
308         }
309 }
310
311 static void
312 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
313 {
314 #ifdef HAVE_IBV_DEVX_EVENT
315         struct mlx5_vdpa_priv *priv = cb_arg;
316         union {
317                 struct mlx5dv_devx_async_event_hdr event_resp;
318                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
319         } out;
320         uint32_t vq_index, i, version;
321         struct mlx5_vdpa_virtq *virtq;
322         uint64_t sec;
323
324         pthread_mutex_lock(&priv->vq_config_lock);
325         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
326                                          sizeof(out.buf)) >=
327                                        (ssize_t)sizeof(out.event_resp.cookie)) {
328                 vq_index = out.event_resp.cookie & UINT32_MAX;
329                 version = out.event_resp.cookie >> 32;
330                 if (vq_index >= priv->nr_virtqs) {
331                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
332                                 priv->vdev->device->name, vq_index);
333                         continue;
334                 }
335                 virtq = &priv->virtqs[vq_index];
336                 if (!virtq->enable || virtq->version != version)
337                         continue;
338                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
339                         continue;
340                 virtq->stopped = true;
341                 /* Query error info. */
342                 if (mlx5_vdpa_virtq_query(priv, vq_index))
343                         goto log;
344                 /* Disable vq. */
345                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
346                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
347                         goto log;
348                 }
349                 /* Retry if error happens less than N times in 3 seconds. */
350                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
351                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
352                         /* Retry. */
353                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
354                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
355                                         vq_index);
356                         else
357                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
358                                         vq_index, ++virtq->n_retry);
359                 } else {
360                         /* Retry timeout, give up. */
361                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
362                                 priv->vdev->device->name, vq_index);
363                 }
364 log:
365                 /* Shift in current time to error time log end. */
366                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
367                         virtq->err_time[i - 1] = virtq->err_time[i];
368                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
369         }
370         pthread_mutex_unlock(&priv->vq_config_lock);
371 #endif
372 }
373
374 int
375 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
376 {
377         int ret;
378         int flags;
379
380         /* Setup device event channel. */
381         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
382                                                               0);
383         if (!priv->err_chnl) {
384                 rte_errno = errno;
385                 DRV_LOG(ERR, "Failed to create device event channel %d.",
386                         rte_errno);
387                 goto error;
388         }
389         flags = fcntl(priv->err_chnl->fd, F_GETFL);
390         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
391         if (ret) {
392                 DRV_LOG(ERR, "Failed to change device event channel FD.");
393                 goto error;
394         }
395
396         if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
397                 goto error;
398
399         if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
400                 goto error;
401
402         if (rte_intr_callback_register(priv->err_intr_handle,
403                                        mlx5_vdpa_err_interrupt_handler,
404                                        priv)) {
405                 rte_intr_fd_set(priv->err_intr_handle, 0);
406                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
407                         priv->vid);
408                 goto error;
409         } else {
410                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
411                         priv->vid);
412         }
413         return 0;
414 error:
415         mlx5_vdpa_err_event_unset(priv);
416         return -1;
417 }
418
419 void
420 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
421 {
422         int retries = MLX5_VDPA_INTR_RETRIES;
423         int ret = -EAGAIN;
424
425         if (!rte_intr_fd_get(priv->err_intr_handle))
426                 return;
427         while (retries-- && ret == -EAGAIN) {
428                 ret = rte_intr_callback_unregister(priv->err_intr_handle,
429                                             mlx5_vdpa_err_interrupt_handler,
430                                             priv);
431                 if (ret == -EAGAIN) {
432                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
433                                 "of error interrupt, retries = %d.",
434                                 rte_intr_fd_get(priv->err_intr_handle),
435                                 retries);
436                         rte_pause();
437                 }
438         }
439         if (priv->err_chnl) {
440 #ifdef HAVE_IBV_DEVX_EVENT
441                 union {
442                         struct mlx5dv_devx_async_event_hdr event_resp;
443                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
444                                     128];
445                 } out;
446
447                 /* Clean all pending events. */
448                 while (mlx5_glue->devx_get_event(priv->err_chnl,
449                        &out.event_resp, sizeof(out.buf)) >=
450                        (ssize_t)sizeof(out.event_resp.cookie))
451                         ;
452 #endif
453                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
454                 priv->err_chnl = NULL;
455         }
456 }
457
458 int
459 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
460 {
461         int ret;
462         rte_cpuset_t cpuset;
463         pthread_attr_t attr;
464         char name[16];
465         const struct sched_param sp = {
466                 .sched_priority = sched_get_priority_max(SCHED_RR),
467         };
468
469         if (!priv->eventc)
470                 /* All virtqs are in poll mode. */
471                 return 0;
472         pthread_attr_init(&attr);
473         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
474         if (ret) {
475                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
476                 return -1;
477         }
478         ret = pthread_attr_setschedparam(&attr, &sp);
479         if (ret) {
480                 DRV_LOG(ERR, "Failed to set thread priority.");
481                 return -1;
482         }
483         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
484                              (void *)priv);
485         if (ret) {
486                 DRV_LOG(ERR, "Failed to create timer thread.");
487                 return -1;
488         }
489         CPU_ZERO(&cpuset);
490         if (priv->event_core != -1)
491                 CPU_SET(priv->event_core, &cpuset);
492         else
493                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
494         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
495         if (ret) {
496                 DRV_LOG(ERR, "Failed to set thread affinity.");
497                 return -1;
498         }
499         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
500         ret = rte_thread_setname(priv->timer_tid, name);
501         if (ret)
502                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
503         return 0;
504 }
505
506 void
507 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
508 {
509         void *status;
510
511         if (priv->timer_tid) {
512                 pthread_cancel(priv->timer_tid);
513                 pthread_join(priv->timer_tid, &status);
514         }
515         priv->timer_tid = 0;
516 }
517
518 void
519 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
520 {
521         mlx5_devx_qp_destroy(&eqp->sw_qp);
522         if (eqp->fw_qp)
523                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
524         mlx5_vdpa_cq_destroy(&eqp->cq);
525         memset(eqp, 0, sizeof(*eqp));
526 }
527
528 static int
529 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
530 {
531         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
532                                           eqp->sw_qp.qp->id)) {
533                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
534                         rte_errno);
535                 return -1;
536         }
537         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
538                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
539                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
540                         rte_errno);
541                 return -1;
542         }
543         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
544                                           eqp->sw_qp.qp->id)) {
545                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
546                         rte_errno);
547                 return -1;
548         }
549         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
550                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
551                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
552                         rte_errno);
553                 return -1;
554         }
555         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
556                                           eqp->sw_qp.qp->id)) {
557                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
558                         rte_errno);
559                 return -1;
560         }
561         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
562                                           eqp->fw_qp->id)) {
563                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
564                         rte_errno);
565                 return -1;
566         }
567         return 0;
568 }
569
570 int
571 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
572                           int callfd, struct mlx5_vdpa_event_qp *eqp)
573 {
574         struct mlx5_devx_qp_attr attr = {0};
575         uint16_t log_desc_n = rte_log2_u32(desc_n);
576         uint32_t ret;
577
578         if (mlx5_vdpa_event_qp_global_prepare(priv))
579                 return -1;
580         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
581                 return -1;
582         attr.pd = priv->cdev->pdn;
583         attr.ts_format =
584                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
585         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
586         if (!eqp->fw_qp) {
587                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
588                 goto error;
589         }
590         attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
591         attr.cqn = eqp->cq.cq_obj.cq->id;
592         attr.rq_size = RTE_BIT32(log_desc_n);
593         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
594         attr.sq_size = 0; /* No need SQ. */
595         attr.ts_format =
596                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
597         ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
598                                   &attr, SOCKET_ID_ANY);
599         if (ret) {
600                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
601                 goto error;
602         }
603         if (mlx5_vdpa_qps2rts(eqp))
604                 goto error;
605         /* First ringing. */
606         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
607                         &eqp->sw_qp.db_rec[0]);
608         return 0;
609 error:
610         mlx5_vdpa_event_qp_destroy(eqp);
611         return -1;
612 }