net/hns3: refactor multi-process initialization
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         if (priv->uar) {
34                 mlx5_glue->devx_free_uar(priv->uar);
35                 priv->uar = NULL;
36         }
37 #ifdef HAVE_IBV_DEVX_EVENT
38         if (priv->eventc) {
39                 mlx5_os_devx_destroy_event_channel(priv->eventc);
40                 priv->eventc = NULL;
41         }
42 #endif
43 }
44
45 /* Prepare all the global resources for all the event objects.*/
46 static int
47 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
48 {
49         if (priv->eventc)
50                 return 0;
51         priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
52                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
53         if (!priv->eventc) {
54                 rte_errno = errno;
55                 DRV_LOG(ERR, "Failed to create event channel %d.",
56                         rte_errno);
57                 goto error;
58         }
59         /*
60          * This PMD always claims the write memory barrier on UAR
61          * registers writings, it is safe to allocate UAR with any
62          * memory mapping type.
63          */
64         priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
65         if (!priv->uar) {
66                 rte_errno = errno;
67                 DRV_LOG(ERR, "Failed to allocate UAR.");
68                 goto error;
69         }
70         return 0;
71 error:
72         mlx5_vdpa_event_qp_global_release(priv);
73         return -1;
74 }
75
76 static void
77 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
78 {
79         mlx5_devx_cq_destroy(&cq->cq_obj);
80         memset(cq, 0, sizeof(*cq));
81 }
82
83 static inline void __rte_unused
84 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
85 {
86         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
87         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
88         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
89         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
90         uint64_t db_be = rte_cpu_to_be_64(doorbell);
91         uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
92
93         rte_io_wmb();
94         cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
95         rte_wmb();
96 #ifdef RTE_ARCH_64
97         *(uint64_t *)addr = db_be;
98 #else
99         *(uint32_t *)addr = db_be;
100         rte_io_wmb();
101         *((uint32_t *)addr + 1) = db_be >> 32;
102 #endif
103         cq->arm_sn++;
104         cq->armed = 1;
105 }
106
107 static int
108 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
109                     int callfd, struct mlx5_vdpa_cq *cq)
110 {
111         struct mlx5_devx_cq_attr attr = {
112                 .use_first_only = 1,
113                 .uar_page_id = priv->uar->page_id,
114         };
115         uint16_t event_nums[1] = {0};
116         int ret;
117
118         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
119                                   &attr, SOCKET_ID_ANY);
120         if (ret)
121                 goto error;
122         cq->cq_ci = 0;
123         cq->log_desc_n = log_desc_n;
124         rte_spinlock_init(&cq->sl);
125         /* Subscribe CQ event to the event channel controlled by the driver. */
126         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
127                                                 cq->cq_obj.cq->obj,
128                                                 sizeof(event_nums), event_nums,
129                                                 (uint64_t)(uintptr_t)cq);
130         if (ret) {
131                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
132                 rte_errno = errno;
133                 goto error;
134         }
135         cq->callfd = callfd;
136         /* Init CQ to ones to be in HW owner in the start. */
137         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
138         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
139         /* First arming. */
140         mlx5_vdpa_cq_arm(priv, cq);
141         return 0;
142 error:
143         mlx5_vdpa_cq_destroy(cq);
144         return -1;
145 }
146
147 static inline uint32_t
148 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
149 {
150         struct mlx5_vdpa_event_qp *eqp =
151                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
152         const unsigned int cq_size = 1 << cq->log_desc_n;
153         union {
154                 struct {
155                         uint16_t wqe_counter;
156                         uint8_t rsvd5;
157                         uint8_t op_own;
158                 };
159                 uint32_t word;
160         } last_word;
161         uint16_t next_wqe_counter = cq->cq_ci;
162         uint16_t cur_wqe_counter;
163         uint16_t comp;
164
165         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
166         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
167         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
168         if (comp) {
169                 cq->cq_ci += comp;
170                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
171                             MLX5_CQE_INVALID);
172                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
173                                MLX5_CQE_RESP_ERR ||
174                                MLX5_CQE_OPCODE(last_word.op_own) ==
175                                MLX5_CQE_REQ_ERR)))
176                         cq->errors++;
177                 rte_io_wmb();
178                 /* Ring CQ doorbell record. */
179                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
180                 rte_io_wmb();
181                 /* Ring SW QP doorbell record. */
182                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
183         }
184         return comp;
185 }
186
187 static void
188 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
189 {
190         struct mlx5_vdpa_cq *cq;
191         int i;
192
193         for (i = 0; i < priv->nr_virtqs; i++) {
194                 cq = &priv->virtqs[i].eqp.cq;
195                 if (cq->cq_obj.cq && !cq->armed)
196                         mlx5_vdpa_cq_arm(priv, cq);
197         }
198 }
199
200 static void
201 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
202 {
203         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
204                 switch (max) {
205                 case 0:
206                         priv->timer_delay_us += priv->event_us;
207                         break;
208                 case 1:
209                         break;
210                 default:
211                         priv->timer_delay_us /= max;
212                         break;
213                 }
214         }
215         if (priv->timer_delay_us)
216                 usleep(priv->timer_delay_us);
217         else
218                 /* Give-up CPU to improve polling threads scheduling. */
219                 sched_yield();
220 }
221
222 /* Notify virtio device for specific virtq new traffic. */
223 static uint32_t
224 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
225 {
226         uint32_t comp = 0;
227
228         if (cq->cq_obj.cq) {
229                 comp = mlx5_vdpa_cq_poll(cq);
230                 if (comp) {
231                         if (cq->callfd != -1)
232                                 eventfd_write(cq->callfd, (eventfd_t)1);
233                         cq->armed = 0;
234                 }
235         }
236         return comp;
237 }
238
239 /* Notify virtio device for any virtq new traffic. */
240 static uint32_t
241 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
242 {
243         int i;
244         uint32_t max = 0;
245
246         for (i = 0; i < priv->nr_virtqs; i++) {
247                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
248                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
249
250                 if (comp > max)
251                         max = comp;
252         }
253         return max;
254 }
255
256 /* Wait on all CQs channel for completion event. */
257 static struct mlx5_vdpa_cq *
258 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
259 {
260 #ifdef HAVE_IBV_DEVX_EVENT
261         union {
262                 struct mlx5dv_devx_async_event_hdr event_resp;
263                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
264         } out;
265         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
266                                             sizeof(out.buf));
267
268         if (ret >= 0)
269                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
270         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
271                 ret, errno);
272 #endif
273         return NULL;
274 }
275
276 static void *
277 mlx5_vdpa_event_handle(void *arg)
278 {
279         struct mlx5_vdpa_priv *priv = arg;
280         struct mlx5_vdpa_cq *cq;
281         uint32_t max;
282
283         switch (priv->event_mode) {
284         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
285         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
286                 priv->timer_delay_us = priv->event_us;
287                 while (1) {
288                         pthread_mutex_lock(&priv->vq_config_lock);
289                         max = mlx5_vdpa_queues_complete(priv);
290                         if (max == 0 && priv->no_traffic_counter++ >=
291                             priv->no_traffic_max) {
292                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
293                                         priv->vdev->device->name);
294                                 mlx5_vdpa_arm_all_cqs(priv);
295                                 do {
296                                         pthread_mutex_unlock
297                                                         (&priv->vq_config_lock);
298                                         cq = mlx5_vdpa_event_wait(priv);
299                                         pthread_mutex_lock
300                                                         (&priv->vq_config_lock);
301                                         if (cq == NULL ||
302                                                mlx5_vdpa_queue_complete(cq) > 0)
303                                                 break;
304                                 } while (1);
305                                 priv->timer_delay_us = priv->event_us;
306                                 priv->no_traffic_counter = 0;
307                         } else if (max != 0) {
308                                 priv->no_traffic_counter = 0;
309                         }
310                         pthread_mutex_unlock(&priv->vq_config_lock);
311                         mlx5_vdpa_timer_sleep(priv, max);
312                 }
313                 return NULL;
314         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
315                 do {
316                         cq = mlx5_vdpa_event_wait(priv);
317                         if (cq != NULL) {
318                                 pthread_mutex_lock(&priv->vq_config_lock);
319                                 if (mlx5_vdpa_queue_complete(cq) > 0)
320                                         mlx5_vdpa_cq_arm(priv, cq);
321                                 pthread_mutex_unlock(&priv->vq_config_lock);
322                         }
323                 } while (1);
324                 return NULL;
325         default:
326                 return NULL;
327         }
328 }
329
330 static void
331 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
332 {
333 #ifdef HAVE_IBV_DEVX_EVENT
334         struct mlx5_vdpa_priv *priv = cb_arg;
335         union {
336                 struct mlx5dv_devx_async_event_hdr event_resp;
337                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
338         } out;
339         uint32_t vq_index, i, version;
340         struct mlx5_vdpa_virtq *virtq;
341         uint64_t sec;
342
343         pthread_mutex_lock(&priv->vq_config_lock);
344         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
345                                          sizeof(out.buf)) >=
346                                        (ssize_t)sizeof(out.event_resp.cookie)) {
347                 vq_index = out.event_resp.cookie & UINT32_MAX;
348                 version = out.event_resp.cookie >> 32;
349                 if (vq_index >= priv->nr_virtqs) {
350                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
351                                 priv->vdev->device->name, vq_index);
352                         continue;
353                 }
354                 virtq = &priv->virtqs[vq_index];
355                 if (!virtq->enable || virtq->version != version)
356                         continue;
357                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
358                         continue;
359                 virtq->stopped = true;
360                 /* Query error info. */
361                 if (mlx5_vdpa_virtq_query(priv, vq_index))
362                         goto log;
363                 /* Disable vq. */
364                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
365                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
366                         goto log;
367                 }
368                 /* Retry if error happens less than N times in 3 seconds. */
369                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
370                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
371                         /* Retry. */
372                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
373                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
374                                         vq_index);
375                         else
376                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
377                                         vq_index, ++virtq->n_retry);
378                 } else {
379                         /* Retry timeout, give up. */
380                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
381                                 priv->vdev->device->name, vq_index);
382                 }
383 log:
384                 /* Shift in current time to error time log end. */
385                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
386                         virtq->err_time[i - 1] = virtq->err_time[i];
387                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
388         }
389         pthread_mutex_unlock(&priv->vq_config_lock);
390 #endif
391 }
392
393 int
394 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
395 {
396         int ret;
397         int flags;
398
399         /* Setup device event channel. */
400         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
401                                                               0);
402         if (!priv->err_chnl) {
403                 rte_errno = errno;
404                 DRV_LOG(ERR, "Failed to create device event channel %d.",
405                         rte_errno);
406                 goto error;
407         }
408         flags = fcntl(priv->err_chnl->fd, F_GETFL);
409         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
410         if (ret) {
411                 DRV_LOG(ERR, "Failed to change device event channel FD.");
412                 goto error;
413         }
414
415         if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
416                 goto error;
417
418         if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
419                 goto error;
420
421         if (rte_intr_callback_register(priv->err_intr_handle,
422                                        mlx5_vdpa_err_interrupt_handler,
423                                        priv)) {
424                 rte_intr_fd_set(priv->err_intr_handle, 0);
425                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
426                         priv->vid);
427                 goto error;
428         } else {
429                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
430                         priv->vid);
431         }
432         return 0;
433 error:
434         mlx5_vdpa_err_event_unset(priv);
435         return -1;
436 }
437
438 void
439 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
440 {
441         int retries = MLX5_VDPA_INTR_RETRIES;
442         int ret = -EAGAIN;
443
444         if (!rte_intr_fd_get(priv->err_intr_handle))
445                 return;
446         while (retries-- && ret == -EAGAIN) {
447                 ret = rte_intr_callback_unregister(priv->err_intr_handle,
448                                             mlx5_vdpa_err_interrupt_handler,
449                                             priv);
450                 if (ret == -EAGAIN) {
451                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
452                                 "of error interrupt, retries = %d.",
453                                 rte_intr_fd_get(priv->err_intr_handle),
454                                 retries);
455                         rte_pause();
456                 }
457         }
458         if (priv->err_chnl) {
459 #ifdef HAVE_IBV_DEVX_EVENT
460                 union {
461                         struct mlx5dv_devx_async_event_hdr event_resp;
462                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
463                                     128];
464                 } out;
465
466                 /* Clean all pending events. */
467                 while (mlx5_glue->devx_get_event(priv->err_chnl,
468                        &out.event_resp, sizeof(out.buf)) >=
469                        (ssize_t)sizeof(out.event_resp.cookie))
470                         ;
471 #endif
472                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
473                 priv->err_chnl = NULL;
474         }
475 }
476
477 int
478 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
479 {
480         int ret;
481         rte_cpuset_t cpuset;
482         pthread_attr_t attr;
483         char name[16];
484         const struct sched_param sp = {
485                 .sched_priority = sched_get_priority_max(SCHED_RR),
486         };
487
488         if (!priv->eventc)
489                 /* All virtqs are in poll mode. */
490                 return 0;
491         pthread_attr_init(&attr);
492         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
493         if (ret) {
494                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
495                 return -1;
496         }
497         ret = pthread_attr_setschedparam(&attr, &sp);
498         if (ret) {
499                 DRV_LOG(ERR, "Failed to set thread priority.");
500                 return -1;
501         }
502         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
503                              (void *)priv);
504         if (ret) {
505                 DRV_LOG(ERR, "Failed to create timer thread.");
506                 return -1;
507         }
508         CPU_ZERO(&cpuset);
509         if (priv->event_core != -1)
510                 CPU_SET(priv->event_core, &cpuset);
511         else
512                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
513         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
514         if (ret) {
515                 DRV_LOG(ERR, "Failed to set thread affinity.");
516                 return -1;
517         }
518         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
519         ret = rte_thread_setname(priv->timer_tid, name);
520         if (ret)
521                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
522         return 0;
523 }
524
525 void
526 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
527 {
528         void *status;
529
530         if (priv->timer_tid) {
531                 pthread_cancel(priv->timer_tid);
532                 pthread_join(priv->timer_tid, &status);
533         }
534         priv->timer_tid = 0;
535 }
536
537 void
538 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
539 {
540         mlx5_devx_qp_destroy(&eqp->sw_qp);
541         if (eqp->fw_qp)
542                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
543         mlx5_vdpa_cq_destroy(&eqp->cq);
544         memset(eqp, 0, sizeof(*eqp));
545 }
546
547 static int
548 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
549 {
550         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
551                                           eqp->sw_qp.qp->id)) {
552                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
553                         rte_errno);
554                 return -1;
555         }
556         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
557                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
558                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
559                         rte_errno);
560                 return -1;
561         }
562         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
563                                           eqp->sw_qp.qp->id)) {
564                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
565                         rte_errno);
566                 return -1;
567         }
568         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
569                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
570                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
571                         rte_errno);
572                 return -1;
573         }
574         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
575                                           eqp->sw_qp.qp->id)) {
576                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
577                         rte_errno);
578                 return -1;
579         }
580         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
581                                           eqp->fw_qp->id)) {
582                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
583                         rte_errno);
584                 return -1;
585         }
586         return 0;
587 }
588
589 int
590 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
591                           int callfd, struct mlx5_vdpa_event_qp *eqp)
592 {
593         struct mlx5_devx_qp_attr attr = {0};
594         uint16_t log_desc_n = rte_log2_u32(desc_n);
595         uint32_t ret;
596
597         if (mlx5_vdpa_event_qp_global_prepare(priv))
598                 return -1;
599         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
600                 return -1;
601         attr.pd = priv->cdev->pdn;
602         attr.ts_format =
603                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
604         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
605         if (!eqp->fw_qp) {
606                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
607                 goto error;
608         }
609         attr.uar_index = priv->uar->page_id;
610         attr.cqn = eqp->cq.cq_obj.cq->id;
611         attr.rq_size = RTE_BIT32(log_desc_n);
612         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
613         attr.sq_size = 0; /* No need SQ. */
614         attr.ts_format =
615                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
616         ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
617                                   &attr, SOCKET_ID_ANY);
618         if (ret) {
619                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
620                 goto error;
621         }
622         if (mlx5_vdpa_qps2rts(eqp))
623                 goto error;
624         /* First ringing. */
625         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
626                         &eqp->sw_qp.db_rec[0]);
627         return 0;
628 error:
629         mlx5_vdpa_event_qp_destroy(eqp);
630         return -1;
631 }