net/ice: fix build when Rx descriptor size is 16
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         if (priv->uar) {
34                 mlx5_glue->devx_free_uar(priv->uar);
35                 priv->uar = NULL;
36         }
37 #ifdef HAVE_IBV_DEVX_EVENT
38         if (priv->eventc) {
39                 mlx5_os_devx_destroy_event_channel(priv->eventc);
40                 priv->eventc = NULL;
41         }
42 #endif
43 }
44
45 /* Prepare all the global resources for all the event objects.*/
46 static int
47 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
48 {
49         if (priv->eventc)
50                 return 0;
51         priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
52                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
53         if (!priv->eventc) {
54                 rte_errno = errno;
55                 DRV_LOG(ERR, "Failed to create event channel %d.",
56                         rte_errno);
57                 goto error;
58         }
59         /*
60          * This PMD always claims the write memory barrier on UAR
61          * registers writings, it is safe to allocate UAR with any
62          * memory mapping type.
63          */
64         priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
65         if (!priv->uar) {
66                 rte_errno = errno;
67                 DRV_LOG(ERR, "Failed to allocate UAR.");
68                 goto error;
69         }
70         return 0;
71 error:
72         mlx5_vdpa_event_qp_global_release(priv);
73         return -1;
74 }
75
76 static void
77 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
78 {
79         mlx5_devx_cq_destroy(&cq->cq_obj);
80         memset(cq, 0, sizeof(*cq));
81 }
82
83 static inline void __rte_unused
84 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
85 {
86         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
87         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
88         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
89         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
90         uint64_t db_be = rte_cpu_to_be_64(doorbell);
91         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
92
93         rte_io_wmb();
94         cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
95         rte_wmb();
96 #ifdef RTE_ARCH_64
97         *(uint64_t *)addr = db_be;
98 #else
99         *(uint32_t *)addr = db_be;
100         rte_io_wmb();
101         *((uint32_t *)addr + 1) = db_be >> 32;
102 #endif
103         cq->arm_sn++;
104         cq->armed = 1;
105 }
106
107 static int
108 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
109                     int callfd, struct mlx5_vdpa_cq *cq)
110 {
111         struct mlx5_devx_cq_attr attr = {
112                 .use_first_only = 1,
113                 .uar_page_id = priv->uar->page_id,
114         };
115         uint16_t event_nums[1] = {0};
116         int ret;
117
118         ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
119                                   SOCKET_ID_ANY);
120         if (ret)
121                 goto error;
122         cq->cq_ci = 0;
123         cq->log_desc_n = log_desc_n;
124         rte_spinlock_init(&cq->sl);
125         /* Subscribe CQ event to the event channel controlled by the driver. */
126         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
127                                                 cq->cq_obj.cq->obj,
128                                                 sizeof(event_nums), event_nums,
129                                                 (uint64_t)(uintptr_t)cq);
130         if (ret) {
131                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
132                 rte_errno = errno;
133                 goto error;
134         }
135         cq->callfd = callfd;
136         /* Init CQ to ones to be in HW owner in the start. */
137         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
138         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
139         /* First arming. */
140         mlx5_vdpa_cq_arm(priv, cq);
141         return 0;
142 error:
143         mlx5_vdpa_cq_destroy(cq);
144         return -1;
145 }
146
147 static inline uint32_t
148 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
149 {
150         struct mlx5_vdpa_event_qp *eqp =
151                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
152         const unsigned int cq_size = 1 << cq->log_desc_n;
153         union {
154                 struct {
155                         uint16_t wqe_counter;
156                         uint8_t rsvd5;
157                         uint8_t op_own;
158                 };
159                 uint32_t word;
160         } last_word;
161         uint16_t next_wqe_counter = cq->cq_ci;
162         uint16_t cur_wqe_counter;
163         uint16_t comp;
164
165         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
166         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
167         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
168         if (comp) {
169                 cq->cq_ci += comp;
170                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
171                             MLX5_CQE_INVALID);
172                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
173                                MLX5_CQE_RESP_ERR ||
174                                MLX5_CQE_OPCODE(last_word.op_own) ==
175                                MLX5_CQE_REQ_ERR)))
176                         cq->errors++;
177                 rte_io_wmb();
178                 /* Ring CQ doorbell record. */
179                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
180                 rte_io_wmb();
181                 /* Ring SW QP doorbell record. */
182                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
183         }
184         return comp;
185 }
186
187 static void
188 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
189 {
190         struct mlx5_vdpa_cq *cq;
191         int i;
192
193         for (i = 0; i < priv->nr_virtqs; i++) {
194                 cq = &priv->virtqs[i].eqp.cq;
195                 if (cq->cq_obj.cq && !cq->armed)
196                         mlx5_vdpa_cq_arm(priv, cq);
197         }
198 }
199
200 static void
201 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
202 {
203         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
204                 switch (max) {
205                 case 0:
206                         priv->timer_delay_us += priv->event_us;
207                         break;
208                 case 1:
209                         break;
210                 default:
211                         priv->timer_delay_us /= max;
212                         break;
213                 }
214         }
215         if (priv->timer_delay_us)
216                 usleep(priv->timer_delay_us);
217         else
218                 /* Give-up CPU to improve polling threads scheduling. */
219                 sched_yield();
220 }
221
222 /* Notify virtio device for specific virtq new traffic. */
223 static uint32_t
224 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
225 {
226         uint32_t comp = 0;
227
228         if (cq->cq_obj.cq) {
229                 comp = mlx5_vdpa_cq_poll(cq);
230                 if (comp) {
231                         if (cq->callfd != -1)
232                                 eventfd_write(cq->callfd, (eventfd_t)1);
233                         cq->armed = 0;
234                 }
235         }
236         return comp;
237 }
238
239 /* Notify virtio device for any virtq new traffic. */
240 static uint32_t
241 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
242 {
243         int i;
244         uint32_t max = 0;
245
246         for (i = 0; i < priv->nr_virtqs; i++) {
247                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
248                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
249
250                 if (comp > max)
251                         max = comp;
252         }
253         return max;
254 }
255
256 /* Wait on all CQs channel for completion event. */
257 static struct mlx5_vdpa_cq *
258 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
259 {
260 #ifdef HAVE_IBV_DEVX_EVENT
261         union {
262                 struct mlx5dv_devx_async_event_hdr event_resp;
263                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
264         } out;
265         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
266                                             sizeof(out.buf));
267
268         if (ret >= 0)
269                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
270         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
271                 ret, errno);
272 #endif
273         return NULL;
274 }
275
276 static void *
277 mlx5_vdpa_event_handle(void *arg)
278 {
279         struct mlx5_vdpa_priv *priv = arg;
280         struct mlx5_vdpa_cq *cq;
281         uint32_t max;
282
283         switch (priv->event_mode) {
284         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
285         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
286                 priv->timer_delay_us = priv->event_us;
287                 while (1) {
288                         pthread_mutex_lock(&priv->vq_config_lock);
289                         max = mlx5_vdpa_queues_complete(priv);
290                         if (max == 0 && priv->no_traffic_counter++ >=
291                             priv->no_traffic_max) {
292                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
293                                         priv->vdev->device->name);
294                                 mlx5_vdpa_arm_all_cqs(priv);
295                                 do {
296                                         pthread_mutex_unlock
297                                                         (&priv->vq_config_lock);
298                                         cq = mlx5_vdpa_event_wait(priv);
299                                         pthread_mutex_lock
300                                                         (&priv->vq_config_lock);
301                                         if (cq == NULL ||
302                                                mlx5_vdpa_queue_complete(cq) > 0)
303                                                 break;
304                                 } while (1);
305                                 priv->timer_delay_us = priv->event_us;
306                                 priv->no_traffic_counter = 0;
307                         } else if (max != 0) {
308                                 priv->no_traffic_counter = 0;
309                         }
310                         pthread_mutex_unlock(&priv->vq_config_lock);
311                         mlx5_vdpa_timer_sleep(priv, max);
312                 }
313                 return NULL;
314         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
315                 do {
316                         cq = mlx5_vdpa_event_wait(priv);
317                         if (cq != NULL) {
318                                 pthread_mutex_lock(&priv->vq_config_lock);
319                                 if (mlx5_vdpa_queue_complete(cq) > 0)
320                                         mlx5_vdpa_cq_arm(priv, cq);
321                                 pthread_mutex_unlock(&priv->vq_config_lock);
322                         }
323                 } while (1);
324                 return NULL;
325         default:
326                 return NULL;
327         }
328 }
329
330 static void
331 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
332 {
333 #ifdef HAVE_IBV_DEVX_EVENT
334         struct mlx5_vdpa_priv *priv = cb_arg;
335         union {
336                 struct mlx5dv_devx_async_event_hdr event_resp;
337                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
338         } out;
339         uint32_t vq_index, i, version;
340         struct mlx5_vdpa_virtq *virtq;
341         uint64_t sec;
342
343         pthread_mutex_lock(&priv->vq_config_lock);
344         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
345                                          sizeof(out.buf)) >=
346                                        (ssize_t)sizeof(out.event_resp.cookie)) {
347                 vq_index = out.event_resp.cookie & UINT32_MAX;
348                 version = out.event_resp.cookie >> 32;
349                 if (vq_index >= priv->nr_virtqs) {
350                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
351                                 priv->vdev->device->name, vq_index);
352                         continue;
353                 }
354                 virtq = &priv->virtqs[vq_index];
355                 if (!virtq->enable || virtq->version != version)
356                         continue;
357                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
358                         continue;
359                 virtq->stopped = true;
360                 /* Query error info. */
361                 if (mlx5_vdpa_virtq_query(priv, vq_index))
362                         goto log;
363                 /* Disable vq. */
364                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
365                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
366                         goto log;
367                 }
368                 /* Retry if error happens less than N times in 3 seconds. */
369                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
370                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
371                         /* Retry. */
372                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
373                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
374                                         vq_index);
375                         else
376                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
377                                         vq_index, ++virtq->n_retry);
378                 } else {
379                         /* Retry timeout, give up. */
380                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
381                                 priv->vdev->device->name, vq_index);
382                 }
383 log:
384                 /* Shift in current time to error time log end. */
385                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
386                         virtq->err_time[i - 1] = virtq->err_time[i];
387                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
388         }
389         pthread_mutex_unlock(&priv->vq_config_lock);
390 #endif
391 }
392
393 int
394 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
395 {
396         int ret;
397         int flags;
398
399         /* Setup device event channel. */
400         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
401         if (!priv->err_chnl) {
402                 rte_errno = errno;
403                 DRV_LOG(ERR, "Failed to create device event channel %d.",
404                         rte_errno);
405                 goto error;
406         }
407         flags = fcntl(priv->err_chnl->fd, F_GETFL);
408         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
409         if (ret) {
410                 DRV_LOG(ERR, "Failed to change device event channel FD.");
411                 goto error;
412         }
413         priv->err_intr_handle.fd = priv->err_chnl->fd;
414         priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
415         if (rte_intr_callback_register(&priv->err_intr_handle,
416                                        mlx5_vdpa_err_interrupt_handler,
417                                        priv)) {
418                 priv->err_intr_handle.fd = 0;
419                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
420                         priv->vid);
421                 goto error;
422         } else {
423                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
424                         priv->vid);
425         }
426         return 0;
427 error:
428         mlx5_vdpa_err_event_unset(priv);
429         return -1;
430 }
431
432 void
433 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
434 {
435         int retries = MLX5_VDPA_INTR_RETRIES;
436         int ret = -EAGAIN;
437
438         if (!priv->err_intr_handle.fd)
439                 return;
440         while (retries-- && ret == -EAGAIN) {
441                 ret = rte_intr_callback_unregister(&priv->err_intr_handle,
442                                             mlx5_vdpa_err_interrupt_handler,
443                                             priv);
444                 if (ret == -EAGAIN) {
445                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
446                                 "of error interrupt, retries = %d.",
447                                 priv->err_intr_handle.fd, retries);
448                         rte_pause();
449                 }
450         }
451         memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
452         if (priv->err_chnl) {
453 #ifdef HAVE_IBV_DEVX_EVENT
454                 union {
455                         struct mlx5dv_devx_async_event_hdr event_resp;
456                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
457                                     128];
458                 } out;
459
460                 /* Clean all pending events. */
461                 while (mlx5_glue->devx_get_event(priv->err_chnl,
462                        &out.event_resp, sizeof(out.buf)) >=
463                        (ssize_t)sizeof(out.event_resp.cookie))
464                         ;
465 #endif
466                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
467                 priv->err_chnl = NULL;
468         }
469 }
470
471 int
472 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
473 {
474         int ret;
475         rte_cpuset_t cpuset;
476         pthread_attr_t attr;
477         char name[16];
478         const struct sched_param sp = {
479                 .sched_priority = sched_get_priority_max(SCHED_RR),
480         };
481
482         if (!priv->eventc)
483                 /* All virtqs are in poll mode. */
484                 return 0;
485         pthread_attr_init(&attr);
486         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
487         if (ret) {
488                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
489                 return -1;
490         }
491         ret = pthread_attr_setschedparam(&attr, &sp);
492         if (ret) {
493                 DRV_LOG(ERR, "Failed to set thread priority.");
494                 return -1;
495         }
496         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
497                              (void *)priv);
498         if (ret) {
499                 DRV_LOG(ERR, "Failed to create timer thread.");
500                 return -1;
501         }
502         CPU_ZERO(&cpuset);
503         if (priv->event_core != -1)
504                 CPU_SET(priv->event_core, &cpuset);
505         else
506                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
507         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
508         if (ret) {
509                 DRV_LOG(ERR, "Failed to set thread affinity.");
510                 return -1;
511         }
512         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
513         ret = rte_thread_setname(priv->timer_tid, name);
514         if (ret)
515                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
516         return 0;
517 }
518
519 void
520 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
521 {
522         void *status;
523
524         if (priv->timer_tid) {
525                 pthread_cancel(priv->timer_tid);
526                 pthread_join(priv->timer_tid, &status);
527         }
528         priv->timer_tid = 0;
529 }
530
531 void
532 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
533 {
534         mlx5_devx_qp_destroy(&eqp->sw_qp);
535         if (eqp->fw_qp)
536                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
537         mlx5_vdpa_cq_destroy(&eqp->cq);
538         memset(eqp, 0, sizeof(*eqp));
539 }
540
541 static int
542 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
543 {
544         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
545                                           eqp->sw_qp.qp->id)) {
546                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
547                         rte_errno);
548                 return -1;
549         }
550         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
551                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
552                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
553                         rte_errno);
554                 return -1;
555         }
556         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
557                                           eqp->sw_qp.qp->id)) {
558                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
559                         rte_errno);
560                 return -1;
561         }
562         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
563                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
564                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
565                         rte_errno);
566                 return -1;
567         }
568         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
569                                           eqp->sw_qp.qp->id)) {
570                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
571                         rte_errno);
572                 return -1;
573         }
574         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
575                                           eqp->fw_qp->id)) {
576                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
577                         rte_errno);
578                 return -1;
579         }
580         return 0;
581 }
582
583 int
584 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
585                           int callfd, struct mlx5_vdpa_event_qp *eqp)
586 {
587         struct mlx5_devx_qp_attr attr = {0};
588         uint16_t log_desc_n = rte_log2_u32(desc_n);
589         uint32_t ret;
590
591         if (mlx5_vdpa_event_qp_global_prepare(priv))
592                 return -1;
593         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
594                 return -1;
595         attr.pd = priv->pdn;
596         attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
597         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
598         if (!eqp->fw_qp) {
599                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
600                 goto error;
601         }
602         attr.uar_index = priv->uar->page_id;
603         attr.cqn = eqp->cq.cq_obj.cq->id;
604         attr.rq_size = RTE_BIT32(log_desc_n);
605         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
606         attr.sq_size = 0; /* No need SQ. */
607         attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
608         ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr,
609                         SOCKET_ID_ANY);
610         if (ret) {
611                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
612                 goto error;
613         }
614         if (mlx5_vdpa_qps2rts(eqp))
615                 goto error;
616         /* First ringing. */
617         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
618                         &eqp->sw_qp.db_rec[0]);
619         return 0;
620 error:
621         mlx5_vdpa_event_qp_destroy(eqp);
622         return -1;
623 }