979a2abd41a35c9be5e9cea3c53ecc714d52d703
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         if (priv->uar) {
34                 mlx5_glue->devx_free_uar(priv->uar);
35                 priv->uar = NULL;
36         }
37 #ifdef HAVE_IBV_DEVX_EVENT
38         if (priv->eventc) {
39                 mlx5_os_devx_destroy_event_channel(priv->eventc);
40                 priv->eventc = NULL;
41         }
42 #endif
43 }
44
45 /* Prepare all the global resources for all the event objects.*/
46 static int
47 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
48 {
49         if (priv->eventc)
50                 return 0;
51         priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
52                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
53         if (!priv->eventc) {
54                 rte_errno = errno;
55                 DRV_LOG(ERR, "Failed to create event channel %d.",
56                         rte_errno);
57                 goto error;
58         }
59         /*
60          * This PMD always claims the write memory barrier on UAR
61          * registers writings, it is safe to allocate UAR with any
62          * memory mapping type.
63          */
64         priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
65         if (!priv->uar) {
66                 rte_errno = errno;
67                 DRV_LOG(ERR, "Failed to allocate UAR.");
68                 goto error;
69         }
70         return 0;
71 error:
72         mlx5_vdpa_event_qp_global_release(priv);
73         return -1;
74 }
75
76 static void
77 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
78 {
79         mlx5_devx_cq_destroy(&cq->cq_obj);
80         memset(cq, 0, sizeof(*cq));
81 }
82
83 static inline void __rte_unused
84 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
85 {
86         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
87         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
88         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
89         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
90         uint64_t db_be = rte_cpu_to_be_64(doorbell);
91         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
92
93         rte_io_wmb();
94         cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
95         rte_wmb();
96 #ifdef RTE_ARCH_64
97         *(uint64_t *)addr = db_be;
98 #else
99         *(uint32_t *)addr = db_be;
100         rte_io_wmb();
101         *((uint32_t *)addr + 1) = db_be >> 32;
102 #endif
103         cq->arm_sn++;
104         cq->armed = 1;
105 }
106
107 static int
108 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
109                     int callfd, struct mlx5_vdpa_cq *cq)
110 {
111         struct mlx5_devx_cq_attr attr = {
112                 .use_first_only = 1,
113                 .uar_page_id = priv->uar->page_id,
114         };
115         uint16_t event_nums[1] = {0};
116         int ret;
117
118         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
119                                   &attr, SOCKET_ID_ANY);
120         if (ret)
121                 goto error;
122         cq->cq_ci = 0;
123         cq->log_desc_n = log_desc_n;
124         rte_spinlock_init(&cq->sl);
125         /* Subscribe CQ event to the event channel controlled by the driver. */
126         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
127                                                 cq->cq_obj.cq->obj,
128                                                 sizeof(event_nums), event_nums,
129                                                 (uint64_t)(uintptr_t)cq);
130         if (ret) {
131                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
132                 rte_errno = errno;
133                 goto error;
134         }
135         cq->callfd = callfd;
136         /* Init CQ to ones to be in HW owner in the start. */
137         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
138         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
139         /* First arming. */
140         mlx5_vdpa_cq_arm(priv, cq);
141         return 0;
142 error:
143         mlx5_vdpa_cq_destroy(cq);
144         return -1;
145 }
146
147 static inline uint32_t
148 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
149 {
150         struct mlx5_vdpa_event_qp *eqp =
151                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
152         const unsigned int cq_size = 1 << cq->log_desc_n;
153         union {
154                 struct {
155                         uint16_t wqe_counter;
156                         uint8_t rsvd5;
157                         uint8_t op_own;
158                 };
159                 uint32_t word;
160         } last_word;
161         uint16_t next_wqe_counter = cq->cq_ci;
162         uint16_t cur_wqe_counter;
163         uint16_t comp;
164
165         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
166         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
167         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
168         if (comp) {
169                 cq->cq_ci += comp;
170                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
171                             MLX5_CQE_INVALID);
172                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
173                                MLX5_CQE_RESP_ERR ||
174                                MLX5_CQE_OPCODE(last_word.op_own) ==
175                                MLX5_CQE_REQ_ERR)))
176                         cq->errors++;
177                 rte_io_wmb();
178                 /* Ring CQ doorbell record. */
179                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
180                 rte_io_wmb();
181                 /* Ring SW QP doorbell record. */
182                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
183         }
184         return comp;
185 }
186
187 static void
188 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
189 {
190         struct mlx5_vdpa_cq *cq;
191         int i;
192
193         for (i = 0; i < priv->nr_virtqs; i++) {
194                 cq = &priv->virtqs[i].eqp.cq;
195                 if (cq->cq_obj.cq && !cq->armed)
196                         mlx5_vdpa_cq_arm(priv, cq);
197         }
198 }
199
200 static void
201 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
202 {
203         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
204                 switch (max) {
205                 case 0:
206                         priv->timer_delay_us += priv->event_us;
207                         break;
208                 case 1:
209                         break;
210                 default:
211                         priv->timer_delay_us /= max;
212                         break;
213                 }
214         }
215         if (priv->timer_delay_us)
216                 usleep(priv->timer_delay_us);
217         else
218                 /* Give-up CPU to improve polling threads scheduling. */
219                 sched_yield();
220 }
221
222 /* Notify virtio device for specific virtq new traffic. */
223 static uint32_t
224 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
225 {
226         uint32_t comp = 0;
227
228         if (cq->cq_obj.cq) {
229                 comp = mlx5_vdpa_cq_poll(cq);
230                 if (comp) {
231                         if (cq->callfd != -1)
232                                 eventfd_write(cq->callfd, (eventfd_t)1);
233                         cq->armed = 0;
234                 }
235         }
236         return comp;
237 }
238
239 /* Notify virtio device for any virtq new traffic. */
240 static uint32_t
241 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
242 {
243         int i;
244         uint32_t max = 0;
245
246         for (i = 0; i < priv->nr_virtqs; i++) {
247                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
248                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
249
250                 if (comp > max)
251                         max = comp;
252         }
253         return max;
254 }
255
256 /* Wait on all CQs channel for completion event. */
257 static struct mlx5_vdpa_cq *
258 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
259 {
260 #ifdef HAVE_IBV_DEVX_EVENT
261         union {
262                 struct mlx5dv_devx_async_event_hdr event_resp;
263                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
264         } out;
265         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
266                                             sizeof(out.buf));
267
268         if (ret >= 0)
269                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
270         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
271                 ret, errno);
272 #endif
273         return NULL;
274 }
275
276 static void *
277 mlx5_vdpa_event_handle(void *arg)
278 {
279         struct mlx5_vdpa_priv *priv = arg;
280         struct mlx5_vdpa_cq *cq;
281         uint32_t max;
282
283         switch (priv->event_mode) {
284         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
285         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
286                 priv->timer_delay_us = priv->event_us;
287                 while (1) {
288                         pthread_mutex_lock(&priv->vq_config_lock);
289                         max = mlx5_vdpa_queues_complete(priv);
290                         if (max == 0 && priv->no_traffic_counter++ >=
291                             priv->no_traffic_max) {
292                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
293                                         priv->vdev->device->name);
294                                 mlx5_vdpa_arm_all_cqs(priv);
295                                 do {
296                                         pthread_mutex_unlock
297                                                         (&priv->vq_config_lock);
298                                         cq = mlx5_vdpa_event_wait(priv);
299                                         pthread_mutex_lock
300                                                         (&priv->vq_config_lock);
301                                         if (cq == NULL ||
302                                                mlx5_vdpa_queue_complete(cq) > 0)
303                                                 break;
304                                 } while (1);
305                                 priv->timer_delay_us = priv->event_us;
306                                 priv->no_traffic_counter = 0;
307                         } else if (max != 0) {
308                                 priv->no_traffic_counter = 0;
309                         }
310                         pthread_mutex_unlock(&priv->vq_config_lock);
311                         mlx5_vdpa_timer_sleep(priv, max);
312                 }
313                 return NULL;
314         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
315                 do {
316                         cq = mlx5_vdpa_event_wait(priv);
317                         if (cq != NULL) {
318                                 pthread_mutex_lock(&priv->vq_config_lock);
319                                 if (mlx5_vdpa_queue_complete(cq) > 0)
320                                         mlx5_vdpa_cq_arm(priv, cq);
321                                 pthread_mutex_unlock(&priv->vq_config_lock);
322                         }
323                 } while (1);
324                 return NULL;
325         default:
326                 return NULL;
327         }
328 }
329
330 static void
331 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
332 {
333 #ifdef HAVE_IBV_DEVX_EVENT
334         struct mlx5_vdpa_priv *priv = cb_arg;
335         union {
336                 struct mlx5dv_devx_async_event_hdr event_resp;
337                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
338         } out;
339         uint32_t vq_index, i, version;
340         struct mlx5_vdpa_virtq *virtq;
341         uint64_t sec;
342
343         pthread_mutex_lock(&priv->vq_config_lock);
344         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
345                                          sizeof(out.buf)) >=
346                                        (ssize_t)sizeof(out.event_resp.cookie)) {
347                 vq_index = out.event_resp.cookie & UINT32_MAX;
348                 version = out.event_resp.cookie >> 32;
349                 if (vq_index >= priv->nr_virtqs) {
350                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
351                                 priv->vdev->device->name, vq_index);
352                         continue;
353                 }
354                 virtq = &priv->virtqs[vq_index];
355                 if (!virtq->enable || virtq->version != version)
356                         continue;
357                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
358                         continue;
359                 virtq->stopped = true;
360                 /* Query error info. */
361                 if (mlx5_vdpa_virtq_query(priv, vq_index))
362                         goto log;
363                 /* Disable vq. */
364                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
365                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
366                         goto log;
367                 }
368                 /* Retry if error happens less than N times in 3 seconds. */
369                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
370                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
371                         /* Retry. */
372                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
373                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
374                                         vq_index);
375                         else
376                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
377                                         vq_index, ++virtq->n_retry);
378                 } else {
379                         /* Retry timeout, give up. */
380                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
381                                 priv->vdev->device->name, vq_index);
382                 }
383 log:
384                 /* Shift in current time to error time log end. */
385                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
386                         virtq->err_time[i - 1] = virtq->err_time[i];
387                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
388         }
389         pthread_mutex_unlock(&priv->vq_config_lock);
390 #endif
391 }
392
393 int
394 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
395 {
396         int ret;
397         int flags;
398
399         /* Setup device event channel. */
400         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
401                                                               0);
402         if (!priv->err_chnl) {
403                 rte_errno = errno;
404                 DRV_LOG(ERR, "Failed to create device event channel %d.",
405                         rte_errno);
406                 goto error;
407         }
408         flags = fcntl(priv->err_chnl->fd, F_GETFL);
409         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
410         if (ret) {
411                 DRV_LOG(ERR, "Failed to change device event channel FD.");
412                 goto error;
413         }
414         priv->err_intr_handle.fd = priv->err_chnl->fd;
415         priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
416         if (rte_intr_callback_register(&priv->err_intr_handle,
417                                        mlx5_vdpa_err_interrupt_handler,
418                                        priv)) {
419                 priv->err_intr_handle.fd = 0;
420                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
421                         priv->vid);
422                 goto error;
423         } else {
424                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
425                         priv->vid);
426         }
427         return 0;
428 error:
429         mlx5_vdpa_err_event_unset(priv);
430         return -1;
431 }
432
433 void
434 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
435 {
436         int retries = MLX5_VDPA_INTR_RETRIES;
437         int ret = -EAGAIN;
438
439         if (!priv->err_intr_handle.fd)
440                 return;
441         while (retries-- && ret == -EAGAIN) {
442                 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
443                                             mlx5_vdpa_err_interrupt_handler,
444                                             priv);
445                 if (ret == -EAGAIN) {
446                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
447                                 "of error interrupt, retries = %d.",
448                                 priv->err_intr_handle.fd, retries);
449                         rte_pause();
450                 }
451         }
452         memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
453         if (priv->err_chnl) {
454 #ifdef HAVE_IBV_DEVX_EVENT
455                 union {
456                         struct mlx5dv_devx_async_event_hdr event_resp;
457                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
458                                     128];
459                 } out;
460
461                 /* Clean all pending events. */
462                 while (mlx5_glue->devx_get_event(priv->err_chnl,
463                        &out.event_resp, sizeof(out.buf)) >=
464                        (ssize_t)sizeof(out.event_resp.cookie))
465                         ;
466 #endif
467                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
468                 priv->err_chnl = NULL;
469         }
470 }
471
472 int
473 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
474 {
475         int ret;
476         rte_cpuset_t cpuset;
477         pthread_attr_t attr;
478         char name[16];
479         const struct sched_param sp = {
480                 .sched_priority = sched_get_priority_max(SCHED_RR),
481         };
482
483         if (!priv->eventc)
484                 /* All virtqs are in poll mode. */
485                 return 0;
486         pthread_attr_init(&attr);
487         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
488         if (ret) {
489                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
490                 return -1;
491         }
492         ret = pthread_attr_setschedparam(&attr, &sp);
493         if (ret) {
494                 DRV_LOG(ERR, "Failed to set thread priority.");
495                 return -1;
496         }
497         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
498                              (void *)priv);
499         if (ret) {
500                 DRV_LOG(ERR, "Failed to create timer thread.");
501                 return -1;
502         }
503         CPU_ZERO(&cpuset);
504         if (priv->event_core != -1)
505                 CPU_SET(priv->event_core, &cpuset);
506         else
507                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
508         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
509         if (ret) {
510                 DRV_LOG(ERR, "Failed to set thread affinity.");
511                 return -1;
512         }
513         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
514         ret = rte_thread_setname(priv->timer_tid, name);
515         if (ret)
516                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
517         return 0;
518 }
519
520 void
521 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
522 {
523         void *status;
524
525         if (priv->timer_tid) {
526                 pthread_cancel(priv->timer_tid);
527                 pthread_join(priv->timer_tid, &status);
528         }
529         priv->timer_tid = 0;
530 }
531
532 void
533 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
534 {
535         mlx5_devx_qp_destroy(&eqp->sw_qp);
536         if (eqp->fw_qp)
537                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
538         mlx5_vdpa_cq_destroy(&eqp->cq);
539         memset(eqp, 0, sizeof(*eqp));
540 }
541
542 static int
543 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
544 {
545         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
546                                           eqp->sw_qp.qp->id)) {
547                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
548                         rte_errno);
549                 return -1;
550         }
551         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
552                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
553                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
554                         rte_errno);
555                 return -1;
556         }
557         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
558                                           eqp->sw_qp.qp->id)) {
559                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
560                         rte_errno);
561                 return -1;
562         }
563         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
564                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
565                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
566                         rte_errno);
567                 return -1;
568         }
569         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
570                                           eqp->sw_qp.qp->id)) {
571                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
572                         rte_errno);
573                 return -1;
574         }
575         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
576                                           eqp->fw_qp->id)) {
577                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
578                         rte_errno);
579                 return -1;
580         }
581         return 0;
582 }
583
584 int
585 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
586                           int callfd, struct mlx5_vdpa_event_qp *eqp)
587 {
588         struct mlx5_devx_qp_attr attr = {0};
589         uint16_t log_desc_n = rte_log2_u32(desc_n);
590         uint32_t ret;
591
592         if (mlx5_vdpa_event_qp_global_prepare(priv))
593                 return -1;
594         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
595                 return -1;
596         attr.pd = priv->pdn;
597         attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
598         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
599         if (!eqp->fw_qp) {
600                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
601                 goto error;
602         }
603         attr.uar_index = priv->uar->page_id;
604         attr.cqn = eqp->cq.cq_obj.cq->id;
605         attr.rq_size = RTE_BIT32(log_desc_n);
606         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
607         attr.sq_size = 0; /* No need SQ. */
608         attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
609         ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
610                                   &attr, SOCKET_ID_ANY);
611         if (ret) {
612                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
613                 goto error;
614         }
615         if (mlx5_vdpa_qps2rts(eqp))
616                 goto error;
617         /* First ringing. */
618         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
619                         &eqp->sw_qp.db_rec[0]);
620         return 0;
621 error:
622         mlx5_vdpa_event_qp_destroy(eqp);
623         return -1;
624 }