vdpa/mlx5: pre-create virtq at probing time
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26
27
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29
30 void
31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33         mlx5_devx_uar_release(&priv->uar);
34 #ifdef HAVE_IBV_DEVX_EVENT
35         if (priv->eventc) {
36                 mlx5_os_devx_destroy_event_channel(priv->eventc);
37                 priv->eventc = NULL;
38         }
39 #endif
40 }
41
42 /* Prepare all the global resources for all the event objects.*/
43 int
44 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
45 {
46         priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
47                            MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
48         if (!priv->eventc) {
49                 rte_errno = errno;
50                 DRV_LOG(ERR, "Failed to create event channel %d.",
51                         rte_errno);
52                 goto error;
53         }
54         if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
55                 DRV_LOG(ERR, "Failed to allocate UAR.");
56                 goto error;
57         }
58         return 0;
59 error:
60         mlx5_vdpa_event_qp_global_release(priv);
61         return -1;
62 }
63
64 static void
65 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
66 {
67         mlx5_devx_cq_destroy(&cq->cq_obj);
68         memset(cq, 0, sizeof(*cq));
69 }
70
71 static inline void __rte_unused
72 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
73 {
74         uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
75         uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
76         uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
77         uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
78         uint64_t db_be = rte_cpu_to_be_64(doorbell);
79
80         mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
81                            &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
82         cq->arm_sn++;
83         cq->armed = 1;
84 }
85
86 static int
87 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
88                     int callfd, struct mlx5_vdpa_cq *cq)
89 {
90         struct mlx5_devx_cq_attr attr = {
91                 .use_first_only = 1,
92                 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
93         };
94         uint16_t event_nums[1] = {0};
95         int ret;
96
97         ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
98                                   &attr, SOCKET_ID_ANY);
99         if (ret)
100                 goto error;
101         cq->cq_ci = 0;
102         cq->log_desc_n = log_desc_n;
103         rte_spinlock_init(&cq->sl);
104         /* Subscribe CQ event to the event channel controlled by the driver. */
105         ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
106                                                 cq->cq_obj.cq->obj,
107                                                 sizeof(event_nums), event_nums,
108                                                 (uint64_t)(uintptr_t)cq);
109         if (ret) {
110                 DRV_LOG(ERR, "Failed to subscribe CQE event.");
111                 rte_errno = errno;
112                 goto error;
113         }
114         cq->callfd = callfd;
115         /* Init CQ to ones to be in HW owner in the start. */
116         cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
117         cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
118         /* First arming. */
119         mlx5_vdpa_cq_arm(priv, cq);
120         return 0;
121 error:
122         mlx5_vdpa_cq_destroy(cq);
123         return -1;
124 }
125
126 static inline uint32_t
127 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
128 {
129         struct mlx5_vdpa_event_qp *eqp =
130                                 container_of(cq, struct mlx5_vdpa_event_qp, cq);
131         const unsigned int cq_size = 1 << cq->log_desc_n;
132         union {
133                 struct {
134                         uint16_t wqe_counter;
135                         uint8_t rsvd5;
136                         uint8_t op_own;
137                 };
138                 uint32_t word;
139         } last_word;
140         uint16_t next_wqe_counter = eqp->qp_pi;
141         uint16_t cur_wqe_counter;
142         uint16_t comp;
143
144         last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
145         cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
146         comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
147         if (comp) {
148                 cq->cq_ci += comp;
149                 MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
150                             MLX5_CQE_INVALID);
151                 if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
152                                MLX5_CQE_RESP_ERR ||
153                                MLX5_CQE_OPCODE(last_word.op_own) ==
154                                MLX5_CQE_REQ_ERR)))
155                         cq->errors++;
156                 rte_io_wmb();
157                 /* Ring CQ doorbell record. */
158                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
159                 eqp->qp_pi += comp;
160                 rte_io_wmb();
161                 /* Ring SW QP doorbell record. */
162                 eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);
163         }
164         return comp;
165 }
166
167 static void
168 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
169 {
170         struct mlx5_vdpa_cq *cq;
171         int i;
172
173         for (i = 0; i < priv->nr_virtqs; i++) {
174                 cq = &priv->virtqs[i].eqp.cq;
175                 if (cq->cq_obj.cq && !cq->armed)
176                         mlx5_vdpa_cq_arm(priv, cq);
177         }
178 }
179
180 static void
181 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
182 {
183         if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
184                 switch (max) {
185                 case 0:
186                         priv->timer_delay_us += priv->event_us;
187                         break;
188                 case 1:
189                         break;
190                 default:
191                         priv->timer_delay_us /= max;
192                         break;
193                 }
194         }
195         if (priv->timer_delay_us)
196                 usleep(priv->timer_delay_us);
197         else
198                 /* Give-up CPU to improve polling threads scheduling. */
199                 sched_yield();
200 }
201
202 /* Notify virtio device for specific virtq new traffic. */
203 static uint32_t
204 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
205 {
206         uint32_t comp = 0;
207
208         if (cq->cq_obj.cq) {
209                 comp = mlx5_vdpa_cq_poll(cq);
210                 if (comp) {
211                         if (cq->callfd != -1)
212                                 eventfd_write(cq->callfd, (eventfd_t)1);
213                         cq->armed = 0;
214                 }
215         }
216         return comp;
217 }
218
219 /* Notify virtio device for any virtq new traffic. */
220 static uint32_t
221 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
222 {
223         int i;
224         uint32_t max = 0;
225
226         for (i = 0; i < priv->nr_virtqs; i++) {
227                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
228                 uint32_t comp = mlx5_vdpa_queue_complete(cq);
229
230                 if (comp > max)
231                         max = comp;
232         }
233         return max;
234 }
235
236 void
237 mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
238 {
239         unsigned int i;
240
241         for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
242                 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
243
244                 mlx5_vdpa_queue_complete(cq);
245                 if (cq->cq_obj.cq) {
246                         cq->cq_obj.cqes[0].wqe_counter =
247                                 rte_cpu_to_be_16(UINT16_MAX);
248                         priv->virtqs[i].eqp.qp_pi = 0;
249                         if (!cq->armed)
250                                 mlx5_vdpa_cq_arm(priv, cq);
251                 }
252         }
253 }
254
255 /* Wait on all CQs channel for completion event. */
256 static struct mlx5_vdpa_cq *
257 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
258 {
259 #ifdef HAVE_IBV_DEVX_EVENT
260         union {
261                 struct mlx5dv_devx_async_event_hdr event_resp;
262                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
263         } out;
264         int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
265                                             sizeof(out.buf));
266
267         if (ret >= 0)
268                 return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
269         DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
270                 ret, errno);
271 #endif
272         return NULL;
273 }
274
275 static void *
276 mlx5_vdpa_event_handle(void *arg)
277 {
278         struct mlx5_vdpa_priv *priv = arg;
279         struct mlx5_vdpa_cq *cq;
280         uint32_t max;
281
282         switch (priv->event_mode) {
283         case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
284         case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
285                 priv->timer_delay_us = priv->event_us;
286                 while (1) {
287                         pthread_mutex_lock(&priv->vq_config_lock);
288                         max = mlx5_vdpa_queues_complete(priv);
289                         if (max == 0 && priv->no_traffic_counter++ >=
290                             priv->no_traffic_max) {
291                                 DRV_LOG(DEBUG, "Device %s traffic was stopped.",
292                                         priv->vdev->device->name);
293                                 mlx5_vdpa_arm_all_cqs(priv);
294                                 do {
295                                         pthread_mutex_unlock
296                                                         (&priv->vq_config_lock);
297                                         cq = mlx5_vdpa_event_wait(priv);
298                                         pthread_mutex_lock
299                                                         (&priv->vq_config_lock);
300                                         if (cq == NULL ||
301                                                mlx5_vdpa_queue_complete(cq) > 0)
302                                                 break;
303                                 } while (1);
304                                 priv->timer_delay_us = priv->event_us;
305                                 priv->no_traffic_counter = 0;
306                         } else if (max != 0) {
307                                 priv->no_traffic_counter = 0;
308                         }
309                         pthread_mutex_unlock(&priv->vq_config_lock);
310                         mlx5_vdpa_timer_sleep(priv, max);
311                 }
312                 return NULL;
313         case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
314                 do {
315                         cq = mlx5_vdpa_event_wait(priv);
316                         if (cq != NULL) {
317                                 pthread_mutex_lock(&priv->vq_config_lock);
318                                 if (mlx5_vdpa_queue_complete(cq) > 0)
319                                         mlx5_vdpa_cq_arm(priv, cq);
320                                 pthread_mutex_unlock(&priv->vq_config_lock);
321                         }
322                 } while (1);
323                 return NULL;
324         default:
325                 return NULL;
326         }
327 }
328
329 static void
330 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
331 {
332 #ifdef HAVE_IBV_DEVX_EVENT
333         struct mlx5_vdpa_priv *priv = cb_arg;
334         union {
335                 struct mlx5dv_devx_async_event_hdr event_resp;
336                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
337         } out;
338         uint32_t vq_index, i, version;
339         struct mlx5_vdpa_virtq *virtq;
340         uint64_t sec;
341
342         pthread_mutex_lock(&priv->vq_config_lock);
343         while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
344                                          sizeof(out.buf)) >=
345                                        (ssize_t)sizeof(out.event_resp.cookie)) {
346                 vq_index = out.event_resp.cookie & UINT32_MAX;
347                 version = out.event_resp.cookie >> 32;
348                 if (vq_index >= priv->nr_virtqs) {
349                         DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
350                                 priv->vdev->device->name, vq_index);
351                         continue;
352                 }
353                 virtq = &priv->virtqs[vq_index];
354                 if (!virtq->enable || virtq->version != version)
355                         continue;
356                 if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
357                         continue;
358                 virtq->stopped = true;
359                 /* Query error info. */
360                 if (mlx5_vdpa_virtq_query(priv, vq_index))
361                         goto log;
362                 /* Disable vq. */
363                 if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
364                         DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
365                         goto log;
366                 }
367                 /* Retry if error happens less than N times in 3 seconds. */
368                 sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
369                 if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
370                         /* Retry. */
371                         if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
372                                 DRV_LOG(ERR, "Failed to enable virtq %d.",
373                                         vq_index);
374                         else
375                                 DRV_LOG(WARNING, "Recover virtq %d: %u.",
376                                         vq_index, ++virtq->n_retry);
377                 } else {
378                         /* Retry timeout, give up. */
379                         DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
380                                 priv->vdev->device->name, vq_index);
381                 }
382 log:
383                 /* Shift in current time to error time log end. */
384                 for (i = 1; i < RTE_DIM(virtq->err_time); i++)
385                         virtq->err_time[i - 1] = virtq->err_time[i];
386                 virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
387         }
388         pthread_mutex_unlock(&priv->vq_config_lock);
389 #endif
390 }
391
392 int
393 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
394 {
395         int ret;
396         int flags;
397
398         /* Setup device event channel. */
399         priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
400                                                               0);
401         if (!priv->err_chnl) {
402                 rte_errno = errno;
403                 DRV_LOG(ERR, "Failed to create device event channel %d.",
404                         rte_errno);
405                 goto error;
406         }
407         flags = fcntl(priv->err_chnl->fd, F_GETFL);
408         ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
409         if (ret) {
410                 rte_errno = errno;
411                 DRV_LOG(ERR, "Failed to change device event channel FD.");
412                 goto error;
413         }
414         priv->err_intr_handle =
415                 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
416         if (priv->err_intr_handle == NULL) {
417                 DRV_LOG(ERR, "Fail to allocate intr_handle");
418                 goto error;
419         }
420         if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
421                 goto error;
422
423         if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
424                 goto error;
425
426         ret = rte_intr_callback_register(priv->err_intr_handle,
427                                          mlx5_vdpa_err_interrupt_handler,
428                                          priv);
429         if (ret != 0) {
430                 rte_intr_fd_set(priv->err_intr_handle, 0);
431                 DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
432                         priv->vid);
433                 rte_errno = -ret;
434                 goto error;
435         } else {
436                 DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
437                         priv->vid);
438         }
439         return 0;
440 error:
441         mlx5_vdpa_err_event_unset(priv);
442         return -1;
443 }
444
445 void
446 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
447 {
448         int retries = MLX5_VDPA_INTR_RETRIES;
449         int ret = -EAGAIN;
450
451         if (!rte_intr_fd_get(priv->err_intr_handle))
452                 return;
453         while (retries-- && ret == -EAGAIN) {
454                 ret = rte_intr_callback_unregister(priv->err_intr_handle,
455                                             mlx5_vdpa_err_interrupt_handler,
456                                             priv);
457                 if (ret == -EAGAIN) {
458                         DRV_LOG(DEBUG, "Try again to unregister fd %d "
459                                 "of error interrupt, retries = %d.",
460                                 rte_intr_fd_get(priv->err_intr_handle),
461                                 retries);
462                         rte_pause();
463                 }
464         }
465         if (priv->err_chnl) {
466 #ifdef HAVE_IBV_DEVX_EVENT
467                 union {
468                         struct mlx5dv_devx_async_event_hdr event_resp;
469                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
470                                     128];
471                 } out;
472
473                 /* Clean all pending events. */
474                 while (mlx5_glue->devx_get_event(priv->err_chnl,
475                        &out.event_resp, sizeof(out.buf)) >=
476                        (ssize_t)sizeof(out.event_resp.cookie))
477                         ;
478 #endif
479                 mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
480                 priv->err_chnl = NULL;
481         }
482         rte_intr_instance_free(priv->err_intr_handle);
483 }
484
485 int
486 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
487 {
488         int ret;
489         rte_cpuset_t cpuset;
490         pthread_attr_t attr;
491         char name[16];
492         const struct sched_param sp = {
493                 .sched_priority = sched_get_priority_max(SCHED_RR),
494         };
495
496         if (!priv->eventc)
497                 /* All virtqs are in poll mode. */
498                 return 0;
499         pthread_attr_init(&attr);
500         ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
501         if (ret) {
502                 DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
503                 return -1;
504         }
505         ret = pthread_attr_setschedparam(&attr, &sp);
506         if (ret) {
507                 DRV_LOG(ERR, "Failed to set thread priority.");
508                 return -1;
509         }
510         ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
511                              (void *)priv);
512         if (ret) {
513                 DRV_LOG(ERR, "Failed to create timer thread.");
514                 return -1;
515         }
516         CPU_ZERO(&cpuset);
517         if (priv->event_core != -1)
518                 CPU_SET(priv->event_core, &cpuset);
519         else
520                 cpuset = rte_lcore_cpuset(rte_get_main_lcore());
521         ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
522         if (ret) {
523                 DRV_LOG(ERR, "Failed to set thread affinity.");
524                 return -1;
525         }
526         snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
527         ret = rte_thread_setname(priv->timer_tid, name);
528         if (ret)
529                 DRV_LOG(DEBUG, "Cannot set timer thread name.");
530         return 0;
531 }
532
533 void
534 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
535 {
536         void *status;
537
538         if (priv->timer_tid) {
539                 pthread_cancel(priv->timer_tid);
540                 pthread_join(priv->timer_tid, &status);
541         }
542         priv->timer_tid = 0;
543 }
544
545 void
546 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
547 {
548         mlx5_devx_qp_destroy(&eqp->sw_qp);
549         if (eqp->fw_qp)
550                 claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
551         mlx5_vdpa_cq_destroy(&eqp->cq);
552         memset(eqp, 0, sizeof(*eqp));
553 }
554
555 static int
556 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
557 {
558         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
559                                           eqp->sw_qp.qp->id)) {
560                 DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
561                         rte_errno);
562                 return -1;
563         }
564         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
565                         MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
566                 DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
567                         rte_errno);
568                 return -1;
569         }
570         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
571                                           eqp->sw_qp.qp->id)) {
572                 DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
573                         rte_errno);
574                 return -1;
575         }
576         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
577                         MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
578                 DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
579                         rte_errno);
580                 return -1;
581         }
582         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
583                                           eqp->sw_qp.qp->id)) {
584                 DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
585                         rte_errno);
586                 return -1;
587         }
588         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
589                                           eqp->fw_qp->id)) {
590                 DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
591                         rte_errno);
592                 return -1;
593         }
594         return 0;
595 }
596
597 static int
598 mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
599 {
600         if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
601                                           eqp->sw_qp.qp->id)) {
602                 DRV_LOG(ERR, "Failed to modify FW QP to RST state(%u).",
603                         rte_errno);
604                 return -1;
605         }
606         if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
607                         MLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {
608                 DRV_LOG(ERR, "Failed to modify SW QP to RST state(%u).",
609                         rte_errno);
610                 return -1;
611         }
612         return mlx5_vdpa_qps2rts(eqp);
613 }
614
615 int
616 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
617                           int callfd, struct mlx5_vdpa_event_qp *eqp)
618 {
619         struct mlx5_devx_qp_attr attr = {0};
620         uint16_t log_desc_n = rte_log2_u32(desc_n);
621         uint32_t ret;
622
623         if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {
624                 /* Reuse existing resources. */
625                 eqp->cq.callfd = callfd;
626                 /* FW will set event qp to error state in q destroy. */
627                 if (!mlx5_vdpa_qps2rst2rts(eqp)) {
628                         rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
629                                         &eqp->sw_qp.db_rec[0]);
630                         return 0;
631                 }
632         }
633         if (eqp->fw_qp)
634                 mlx5_vdpa_event_qp_destroy(eqp);
635         if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
636                 return -1;
637         attr.pd = priv->cdev->pdn;
638         attr.ts_format =
639                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
640         eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
641         if (!eqp->fw_qp) {
642                 DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
643                 goto error;
644         }
645         attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
646         attr.cqn = eqp->cq.cq_obj.cq->id;
647         attr.num_of_receive_wqes = RTE_BIT32(log_desc_n);
648         attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
649         attr.num_of_send_wqbbs = 0; /* No need SQ. */
650         attr.ts_format =
651                 mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
652         ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
653                                         attr.num_of_receive_wqes *
654                                         MLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);
655         if (ret) {
656                 DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
657                 goto error;
658         }
659         if (mlx5_vdpa_qps2rts(eqp))
660                 goto error;
661         eqp->qp_pi = 0;
662         /* First ringing. */
663         if (eqp->sw_qp.db_rec)
664                 rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
665                         &eqp->sw_qp.db_rec[0]);
666         return 0;
667 error:
668         mlx5_vdpa_event_qp_destroy(eqp);
669         return -1;
670 }