raw/cnxk_bphy: add doxygen comments
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         mlx5_devx_uar_release(&priv->uar);
34 #ifdef HAVE_IBV_DEVX_EVENT
35         if (priv->eventc) {
36                 mlx5_os_devx_destroy_event_channel(priv->eventc);
37                 priv->eventc = NULL;
38         }
39 #endif
40 }
41
42 /* Prepare all the global resources for all the event objects.*/
43 int
44 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
45 {
46         priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
47                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
48         if (!priv->eventc) {
49                 rte_errno = errno;
50                 DRV_LOG(ERR, "Failed to create event channel %d.",
51                         rte_errno);
52                 goto error;
53         }
54         if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
55                 DRV_LOG(ERR, "Failed to allocate UAR.");
56                 goto error;
57         }
58         return 0;
59 error:
60         mlx5_vdpa_event_qp_global_release(priv);
61         return -1;
62 }
63
64 static void
65 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
66 {
67         mlx5_devx_cq_destroy(&cq->cq_obj);
68         memset(cq, 0, sizeof(*cq));
69 }
70
71 static inline void __rte_unused
72 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
73 {
74         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
75         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
76         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
77         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
78         uint64_t db_be = rte_cpu_to_be_64(doorbell);
79
80         mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
81                            &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
82         cq->arm_sn++;
83         cq->armed = 1;
84 }
85
86 static int
87 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
88                     int callfd, struct mlx5_vdpa_cq *cq)
89 {
90         struct mlx5_devx_cq_attr attr = {
91                 .use_first_only = 1,
92                 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
93         };
94         uint16_t event_nums[1] = {0};
95         int ret;
96
97         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
98                                   &attr, SOCKET_ID_ANY);
99         if (ret)
100                 goto error;
101         cq->cq_ci = 0;
102         cq->log_desc_n = log_desc_n;
103         rte_spinlock_init(&cq->sl);
104         /* Subscribe CQ event to the event channel controlled by the driver. */
105         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
106                                                 cq->cq_obj.cq->obj,
107                                                 sizeof(event_nums), event_nums,
108                                                 (uint64_t)(uintptr_t)cq);
109         if (ret) {
110                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
111                 rte_errno = errno;
112                 goto error;
113         }
114         cq->callfd = callfd;
115         /* Init CQ to ones to be in HW owner in the start. */
116         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
117         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
118         /* First arming. */
119         mlx5_vdpa_cq_arm(priv, cq);
120         return 0;
121 error:
122         mlx5_vdpa_cq_destroy(cq);
123         return -1;
124 }
125
126 static inline uint32_t
127 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
128 {
129         struct mlx5_vdpa_event_qp *eqp =
130                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
131         const unsigned int cq_size = 1 << cq->log_desc_n;
132         union {
133                 struct {
134                         uint16_t wqe_counter;
135                         uint8_t rsvd5;
136                         uint8_t op_own;
137                 };
138                 uint32_t word;
139         } last_word;
140         uint16_t next_wqe_counter = cq->cq_ci;
141         uint16_t cur_wqe_counter;
142         uint16_t comp;
143
144         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
145         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
146         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
147         if (comp) {
148                 cq->cq_ci += comp;
149                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
150                             MLX5_CQE_INVALID);
151                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
152                                MLX5_CQE_RESP_ERR ||
153                                MLX5_CQE_OPCODE(last_word.op_own) ==
154                                MLX5_CQE_REQ_ERR)))
155                         cq->errors++;
156                 rte_io_wmb();
157                 /* Ring CQ doorbell record. */
158                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
159                 rte_io_wmb();
160                 /* Ring SW QP doorbell record. */
161                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
162         }
163         return comp;
164 }
165
166 static void
167 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
168 {
169         struct mlx5_vdpa_cq *cq;
170         int i;
171
172         for (i = 0; i < priv->nr_virtqs; i++) {
173                 cq = &priv->virtqs[i].eqp.cq;
174                 if (cq->cq_obj.cq && !cq->armed)
175                         mlx5_vdpa_cq_arm(priv, cq);
176         }
177 }
178
179 static void
180 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
181 {
182         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
183                 switch (max) {
184                 case 0:
185                         priv->timer_delay_us += priv->event_us;
186                         break;
187                 case 1:
188                         break;
189                 default:
190                         priv->timer_delay_us /= max;
191                         break;
192                 }
193         }
194         if (priv->timer_delay_us)
195                 usleep(priv->timer_delay_us);
196         else
197                 /* Give-up CPU to improve polling threads scheduling. */
198                 sched_yield();
199 }
200
201 /* Notify virtio device for specific virtq new traffic. */
202 static uint32_t
203 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
204 {
205         uint32_t comp = 0;
206
207         if (cq->cq_obj.cq) {
208                 comp = mlx5_vdpa_cq_poll(cq);
209                 if (comp) {
210                         if (cq->callfd != -1)
211                                 eventfd_write(cq->callfd, (eventfd_t)1);
212                         cq->armed = 0;
213                 }
214         }
215         return comp;
216 }
217
218 /* Notify virtio device for any virtq new traffic. */
219 static uint32_t
220 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
221 {
222         int i;
223         uint32_t max = 0;
224
225         for (i = 0; i < priv->nr_virtqs; i++) {
226                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
227                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
228
229                 if (comp > max)
230                         max = comp;
231         }
232         return max;
233 }
234
235 /* Wait on all CQs channel for completion event. */
236 static struct mlx5_vdpa_cq *
237 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
238 {
239 #ifdef HAVE_IBV_DEVX_EVENT
240         union {
241                 struct mlx5dv_devx_async_event_hdr event_resp;
242                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
243         } out;
244         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
245                                             sizeof(out.buf));
246
247         if (ret >= 0)
248                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
249         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
250                 ret, errno);
251 #endif
252         return NULL;
253 }
254
255 static void *
256 mlx5_vdpa_event_handle(void *arg)
257 {
258         struct mlx5_vdpa_priv *priv = arg;
259         struct mlx5_vdpa_cq *cq;
260         uint32_t max;
261
262         switch (priv->event_mode) {
263         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
264         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
265                 priv->timer_delay_us = priv->event_us;
266                 while (1) {
267                         pthread_mutex_lock(&priv->vq_config_lock);
268                         max = mlx5_vdpa_queues_complete(priv);
269                         if (max == 0 && priv->no_traffic_counter++ >=
270                             priv->no_traffic_max) {
271                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
272                                         priv->vdev->device->name);
273                                 mlx5_vdpa_arm_all_cqs(priv);
274                                 do {
275                                         pthread_mutex_unlock
276                                                         (&priv->vq_config_lock);
277                                         cq = mlx5_vdpa_event_wait(priv);
278                                         pthread_mutex_lock
279                                                         (&priv->vq_config_lock);
280                                         if (cq == NULL ||
281                                                mlx5_vdpa_queue_complete(cq) > 0)
282                                                 break;
283                                 } while (1);
284                                 priv->timer_delay_us = priv->event_us;
285                                 priv->no_traffic_counter = 0;
286                         } else if (max != 0) {
287                                 priv->no_traffic_counter = 0;
288                         }
289                         pthread_mutex_unlock(&priv->vq_config_lock);
290                         mlx5_vdpa_timer_sleep(priv, max);
291                 }
292                 return NULL;
293         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
294                 do {
295                         cq = mlx5_vdpa_event_wait(priv);
296                         if (cq != NULL) {
297                                 pthread_mutex_lock(&priv->vq_config_lock);
298                                 if (mlx5_vdpa_queue_complete(cq) > 0)
299                                         mlx5_vdpa_cq_arm(priv, cq);
300                                 pthread_mutex_unlock(&priv->vq_config_lock);
301                         }
302                 } while (1);
303                 return NULL;
304         default:
305                 return NULL;
306         }
307 }
308
309 static void
310 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
311 {
312 #ifdef HAVE_IBV_DEVX_EVENT
313         struct mlx5_vdpa_priv *priv = cb_arg;
314         union {
315                 struct mlx5dv_devx_async_event_hdr event_resp;
316                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
317         } out;
318         uint32_t vq_index, i, version;
319         struct mlx5_vdpa_virtq *virtq;
320         uint64_t sec;
321
322         pthread_mutex_lock(&priv->vq_config_lock);
323         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
324                                          sizeof(out.buf)) >=
325                                        (ssize_t)sizeof(out.event_resp.cookie)) {
326                 vq_index = out.event_resp.cookie & UINT32_MAX;
327                 version = out.event_resp.cookie >> 32;
328                 if (vq_index >= priv->nr_virtqs) {
329                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
330                                 priv->vdev->device->name, vq_index);
331                         continue;
332                 }
333                 virtq = &priv->virtqs[vq_index];
334                 if (!virtq->enable || virtq->version != version)
335                         continue;
336                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
337                         continue;
338                 virtq->stopped = true;
339                 /* Query error info. */
340                 if (mlx5_vdpa_virtq_query(priv, vq_index))
341                         goto log;
342                 /* Disable vq. */
343                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
344                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
345                         goto log;
346                 }
347                 /* Retry if error happens less than N times in 3 seconds. */
348                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
349                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
350                         /* Retry. */
351                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
352                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
353                                         vq_index);
354                         else
355                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
356                                         vq_index, ++virtq->n_retry);
357                 } else {
358                         /* Retry timeout, give up. */
359                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
360                                 priv->vdev->device->name, vq_index);
361                 }
362 log:
363                 /* Shift in current time to error time log end. */
364                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
365                         virtq->err_time[i - 1] = virtq->err_time[i];
366                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
367         }
368         pthread_mutex_unlock(&priv->vq_config_lock);
369 #endif
370 }
371
372 int
373 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
374 {
375         int ret;
376         int flags;
377
378         /* Setup device event channel. */
379         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
380                                                               0);
381         if (!priv->err_chnl) {
382                 rte_errno = errno;
383                 DRV_LOG(ERR, "Failed to create device event channel %d.",
384                         rte_errno);
385                 goto error;
386         }
387         flags = fcntl(priv->err_chnl->fd, F_GETFL);
388         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
389         if (ret) {
390                 rte_errno = errno;
391                 DRV_LOG(ERR, "Failed to change device event channel FD.");
392                 goto error;
393         }
394         priv->err_intr_handle =
395                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
396         if (priv->err_intr_handle == NULL) {
397                 DRV_LOG(ERR, "Fail to allocate intr_handle");
398                 goto error;
399         }
400         if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
401                 goto error;
402
403         if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
404                 goto error;
405
406         ret = rte_intr_callback_register(priv->err_intr_handle,
407                                          mlx5_vdpa_err_interrupt_handler,
408                                          priv);
409         if (ret != 0) {
410                 rte_intr_fd_set(priv->err_intr_handle, 0);
411                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
412                         priv->vid);
413                 rte_errno = -ret;
414                 goto error;
415         } else {
416                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
417                         priv->vid);
418         }
419         return 0;
420 error:
421         mlx5_vdpa_err_event_unset(priv);
422         return -1;
423 }
424
425 void
426 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
427 {
428         int retries = MLX5_VDPA_INTR_RETRIES;
429         int ret = -EAGAIN;
430
431         if (!rte_intr_fd_get(priv->err_intr_handle))
432                 return;
433         while (retries-- && ret == -EAGAIN) {
434                 ret = rte_intr_callback_unregister(priv->err_intr_handle,
435                                             mlx5_vdpa_err_interrupt_handler,
436                                             priv);
437                 if (ret == -EAGAIN) {
438                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
439                                 "of error interrupt, retries = %d.",
440                                 rte_intr_fd_get(priv->err_intr_handle),
441                                 retries);
442                         rte_pause();
443                 }
444         }
445         if (priv->err_chnl) {
446 #ifdef HAVE_IBV_DEVX_EVENT
447                 union {
448                         struct mlx5dv_devx_async_event_hdr event_resp;
449                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
450                                     128];
451                 } out;
452
453                 /* Clean all pending events. */
454                 while (mlx5_glue->devx_get_event(priv->err_chnl,
455                        &out.event_resp, sizeof(out.buf)) >=
456                        (ssize_t)sizeof(out.event_resp.cookie))
457                         ;
458 #endif
459                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
460                 priv->err_chnl = NULL;
461         }
462         rte_intr_instance_free(priv->err_intr_handle);
463 }
464
465 int
466 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
467 {
468         int ret;
469         rte_cpuset_t cpuset;
470         pthread_attr_t attr;
471         char name[16];
472         const struct sched_param sp = {
473                 .sched_priority = sched_get_priority_max(SCHED_RR),
474         };
475
476         if (!priv->eventc)
477                 /* All virtqs are in poll mode. */
478                 return 0;
479         pthread_attr_init(&attr);
480         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
481         if (ret) {
482                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
483                 return -1;
484         }
485         ret = pthread_attr_setschedparam(&attr, &sp);
486         if (ret) {
487                 DRV_LOG(ERR, "Failed to set thread priority.");
488                 return -1;
489         }
490         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
491                              (void *)priv);
492         if (ret) {
493                 DRV_LOG(ERR, "Failed to create timer thread.");
494                 return -1;
495         }
496         CPU_ZERO(&cpuset);
497         if (priv->event_core != -1)
498                 CPU_SET(priv->event_core, &cpuset);
499         else
500                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
501         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
502         if (ret) {
503                 DRV_LOG(ERR, "Failed to set thread affinity.");
504                 return -1;
505         }
506         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
507         ret = rte_thread_setname(priv->timer_tid, name);
508         if (ret)
509                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
510         return 0;
511 }
512
513 void
514 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
515 {
516         void *status;
517
518         if (priv->timer_tid) {
519                 pthread_cancel(priv->timer_tid);
520                 pthread_join(priv->timer_tid, &status);
521         }
522         priv->timer_tid = 0;
523 }
524
525 void
526 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
527 {
528         mlx5_devx_qp_destroy(&eqp->sw_qp);
529         if (eqp->fw_qp)
530                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
531         mlx5_vdpa_cq_destroy(&eqp->cq);
532         memset(eqp, 0, sizeof(*eqp));
533 }
534
535 static int
536 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
537 {
538         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
539                                           eqp->sw_qp.qp->id)) {
540                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
541                         rte_errno);
542                 return -1;
543         }
544         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
545                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
546                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
547                         rte_errno);
548                 return -1;
549         }
550         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
551                                           eqp->sw_qp.qp->id)) {
552                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
553                         rte_errno);
554                 return -1;
555         }
556         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
557                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
558                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
559                         rte_errno);
560                 return -1;
561         }
562         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
563                                           eqp->sw_qp.qp->id)) {
564                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
565                         rte_errno);
566                 return -1;
567         }
568         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
569                                           eqp->fw_qp->id)) {
570                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
571                         rte_errno);
572                 return -1;
573         }
574         return 0;
575 }
576
577 int
578 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
579                           int callfd, struct mlx5_vdpa_event_qp *eqp)
580 {
581         struct mlx5_devx_qp_attr attr = {0};
582         uint16_t log_desc_n = rte_log2_u32(desc_n);
583         uint32_t ret;
584
585         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
586                 return -1;
587         attr.pd = priv->cdev->pdn;
588         attr.ts_format =
589                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
590         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
591         if (!eqp->fw_qp) {
592                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
593                 goto error;
594         }
595         attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
596         attr.cqn = eqp->cq.cq_obj.cq->id;
597         attr.num_of_receive_wqes = RTE_BIT32(log_desc_n);
598         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
599         attr.num_of_send_wqbbs = 0; /* No need SQ. */
600         attr.ts_format =
601                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
602         ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
603                                         attr.num_of_receive_wqes *
604                                         MLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);
605         if (ret) {
606                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
607                 goto error;
608         }
609         if (mlx5_vdpa_qps2rts(eqp))
610                 goto error;
611         /* First ringing. */
612         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
613                         &eqp->sw_qp.db_rec[0]);
614         return 0;
615 error:
616         mlx5_vdpa_event_qp_destroy(eqp);
617         return -1;
618 }