net/mlx5: fix UAR used by ASO queues
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_age.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <mlx5_prm.h>
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7
8 #include <mlx5_malloc.h>
9 #include <mlx5_common_os.h>
10
11 #include "mlx5.h"
12 #include "mlx5_flow.h"
13
14 /**
15  * Destroy Completion Queue used for ASO access.
16  *
17  * @param[in] cq
18  *   ASO CQ to destroy.
19  */
20 static void
21 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
22 {
23         if (cq->cq)
24                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
25         if (cq->umem_obj)
26                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
27         if (cq->umem_buf)
28                 mlx5_free((void *)(uintptr_t)cq->umem_buf);
29         memset(cq, 0, sizeof(*cq));
30 }
31
32 /**
33  * Create Completion Queue used for ASO access.
34  *
35  * @param[in] ctx
36  *   Context returned from mlx5 open_device() glue function.
37  * @param[in/out] cq
38  *   Pointer to CQ to create.
39  * @param[in] log_desc_n
40  *   Log of number of descriptors in queue.
41  * @param[in] socket
42  *   Socket to use for allocation.
43  * @param[in] uar_page_id
44  *   UAR page ID to use.
45  * @param[in] eqn
46  *   EQ number.
47  *
48  * @return
49  *   0 on success, a negative errno value otherwise and rte_errno is set.
50  */
51 static int
52 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
53                    int socket, int uar_page_id, uint32_t eqn)
54 {
55         struct mlx5_devx_cq_attr attr = { 0 };
56         size_t pgsize = sysconf(_SC_PAGESIZE);
57         uint32_t umem_size;
58         uint16_t cq_size = 1 << log_desc_n;
59
60         cq->log_desc_n = log_desc_n;
61         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
62         cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
63                                    4096, socket);
64         if (!cq->umem_buf) {
65                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
66                 rte_errno = ENOMEM;
67                 return -ENOMEM;
68         }
69         cq->umem_obj = mlx5_glue->devx_umem_reg(ctx,
70                                                 (void *)(uintptr_t)cq->umem_buf,
71                                                 umem_size,
72                                                 IBV_ACCESS_LOCAL_WRITE);
73         if (!cq->umem_obj) {
74                 DRV_LOG(ERR, "Failed to register umem for aso CQ.");
75                 goto error;
76         }
77         attr.q_umem_valid = 1;
78         attr.db_umem_valid = 1;
79         attr.use_first_only = 0;
80         attr.overrun_ignore = 0;
81         attr.uar_page_id = uar_page_id;
82         attr.q_umem_id = mlx5_os_get_umem_id(cq->umem_obj);
83         attr.q_umem_offset = 0;
84         attr.db_umem_id = attr.q_umem_id;
85         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
86         attr.eqn = eqn;
87         attr.log_cq_size = log_desc_n;
88         attr.log_page_size = rte_log2_u32(pgsize);
89         cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr);
90         if (!cq->cq)
91                 goto error;
92         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
93         cq->cq_ci = 0;
94         memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
95         return 0;
96 error:
97         mlx5_aso_cq_destroy(cq);
98         return -1;
99 }
100
101 /**
102  * Free MR resources.
103  *
104  * @param[in] mr
105  *   MR to free.
106  */
107 static void
108 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
109 {
110         claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
111         if (!mr->is_indirect && mr->umem)
112                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
113         mlx5_free(mr->buf);
114         memset(mr, 0, sizeof(*mr));
115 }
116
117 /**
118  * Register Memory Region.
119  *
120  * @param[in] ctx
121  *   Context returned from mlx5 open_device() glue function.
122  * @param[in] length
123  *   Size of MR buffer.
124  * @param[in/out] mr
125  *   Pointer to MR to create.
126  * @param[in] socket
127  *   Socket to use for allocation.
128  * @param[in] pdn
129  *   Protection Domain number to use.
130  *
131  * @return
132  *   0 on success, a negative errno value otherwise and rte_errno is set.
133  */
134 static int
135 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
136                      int socket, int pdn)
137 {
138         struct mlx5_devx_mkey_attr mkey_attr;
139
140         mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
141                               socket);
142         if (!mr->buf) {
143                 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
144                 return -1;
145         }
146         mr->umem = mlx5_glue->devx_umem_reg(ctx, mr->buf, length,
147                                                  IBV_ACCESS_LOCAL_WRITE);
148         if (!mr->umem) {
149                 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
150                 goto error;
151         }
152         mkey_attr.addr = (uintptr_t)mr->buf;
153         mkey_attr.size = length;
154         mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
155         mkey_attr.pd = pdn;
156         mkey_attr.pg_access = 1;
157         mkey_attr.klm_array = NULL;
158         mkey_attr.klm_num = 0;
159         mkey_attr.relaxed_ordering_read = 0;
160         mkey_attr.relaxed_ordering_write = 0;
161         mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
162         if (!mr->mkey) {
163                 DRV_LOG(ERR, "Failed to create direct Mkey.");
164                 goto error;
165         }
166         mr->length = length;
167         mr->is_indirect = false;
168         return 0;
169 error:
170         if (mr->umem)
171                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
172         mlx5_free(mr->buf);
173         return -1;
174 }
175
176 /**
177  * Destroy Send Queue used for ASO access.
178  *
179  * @param[in] sq
180  *   ASO SQ to destroy.
181  */
182 static void
183 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
184 {
185         if (sq->wqe_umem) {
186                 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
187                 sq->wqe_umem = NULL;
188         }
189         if (sq->umem_buf) {
190                 mlx5_free((void *)(uintptr_t)sq->umem_buf);
191                 sq->umem_buf = NULL;
192         }
193         if (sq->sq) {
194                 mlx5_devx_cmd_destroy(sq->sq);
195                 sq->sq = NULL;
196         }
197         if (sq->cq.cq)
198                 mlx5_aso_cq_destroy(&sq->cq);
199         mlx5_aso_devx_dereg_mr(&sq->mr);
200         memset(sq, 0, sizeof(*sq));
201 }
202
203 /**
204  * Initialize Send Queue used for ASO access.
205  *
206  * @param[in] sq
207  *   ASO SQ to initialize.
208  */
209 static void
210 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
211 {
212         volatile struct mlx5_aso_wqe *restrict wqe;
213         int i;
214         int size = 1 << sq->log_desc_n;
215         uint64_t addr;
216
217         /* All the next fields state should stay constant. */
218         for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
219                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
220                                                           (sizeof(*wqe) >> 4));
221                 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
222                 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
223                                             MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
224                 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
225                 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
226                 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
227                         (0u |
228                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
229                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
230                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
231                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
232                 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
233         }
234 }
235
236 /**
237  * Create Send Queue used for ASO access.
238  *
239  * @param[in] ctx
240  *   Context returned from mlx5 open_device() glue function.
241  * @param[in/out] sq
242  *   Pointer to SQ to create.
243  * @param[in] socket
244  *   Socket to use for allocation.
245  * @param[in] uar
246  *   User Access Region object.
247  * @param[in] pdn
248  *   Protection Domain number to use.
249  * @param[in] eqn
250  *   EQ number.
251  * @param[in] log_desc_n
252  *   Log of number of descriptors in queue.
253  *
254  * @return
255  *   0 on success, a negative errno value otherwise and rte_errno is set.
256  */
257 static int
258 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
259                    struct mlx5dv_devx_uar *uar, uint32_t pdn,
260                    uint32_t eqn,  uint16_t log_desc_n)
261 {
262         struct mlx5_devx_create_sq_attr attr = { 0 };
263         struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
264         size_t pgsize = sysconf(_SC_PAGESIZE);
265         struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
266         uint32_t sq_desc_n = 1 << log_desc_n;
267         uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
268         int ret;
269
270         if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
271                                  sq_desc_n, &sq->mr, socket, pdn))
272                 return -1;
273         if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
274                                 mlx5_os_get_devx_uar_page_id(uar), eqn))
275                 goto error;
276         sq->log_desc_n = log_desc_n;
277         sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
278                                    sizeof(*sq->db_rec) * 2, 4096, socket);
279         if (!sq->umem_buf) {
280                 DRV_LOG(ERR, "Can't allocate wqe buffer.");
281                 return -ENOMEM;
282         }
283         sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx,
284                                                 (void *)(uintptr_t)sq->umem_buf,
285                                                 wq_size +
286                                                 sizeof(*sq->db_rec) * 2,
287                                                 IBV_ACCESS_LOCAL_WRITE);
288         if (!sq->wqe_umem) {
289                 DRV_LOG(ERR, "Failed to register umem for SQ.");
290                 rte_errno = ENOMEM;
291                 goto error;
292         }
293         attr.state = MLX5_SQC_STATE_RST;
294         attr.tis_lst_sz = 0;
295         attr.tis_num = 0;
296         attr.user_index = 0xFFFF;
297         attr.cqn = sq->cq.cq->id;
298         wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(uar);
299         wq_attr->pd = pdn;
300         wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
301         wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
302         wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
303         wq_attr->wq_umem_offset = 0;
304         wq_attr->wq_umem_valid = 1;
305         wq_attr->log_wq_stride = 6;
306         wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
307         wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
308         wq_attr->dbr_addr = wq_size;
309         wq_attr->dbr_umem_valid = 1;
310         sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
311         if (!sq->sq) {
312                 DRV_LOG(ERR, "Can't create sq object.");
313                 rte_errno  = ENOMEM;
314                 goto error;
315         }
316         modify_attr.state = MLX5_SQC_STATE_RDY;
317         ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
318         if (ret) {
319                 DRV_LOG(ERR, "Can't change sq state to ready.");
320                 rte_errno  = ENOMEM;
321                 goto error;
322         }
323         sq->ci = 0;
324         sq->pi = 0;
325         sq->sqn = sq->sq->id;
326         sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
327         sq->uar_addr = (volatile uint64_t *)((uint8_t *)uar->base_addr + 0x800);
328         mlx5_aso_init_sq(sq);
329         return 0;
330 error:
331         mlx5_aso_destroy_sq(sq);
332         return -1;
333 }
334
335 /**
336  * API to create and initialize Send Queue used for ASO access.
337  *
338  * @param[in] sh
339  *   Pointer to shared device context.
340  *
341  * @return
342  *   0 on success, a negative errno value otherwise and rte_errno is set.
343  */
344 int
345 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
346 {
347         return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
348                                   sh->tx_uar, sh->pdn, sh->eqn,
349                                   MLX5_ASO_QUEUE_LOG_DESC);
350 }
351
352 /**
353  * API to destroy Send Queue used for ASO access.
354  *
355  * @param[in] sh
356  *   Pointer to shared device context.
357  */
358 void
359 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
360 {
361         mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
362 }
363
364 /**
365  * Write a burst of WQEs to ASO SQ.
366  *
367  * @param[in] mng
368  *   ASO management data, contains the SQ.
369  * @param[in] n
370  *   Index of the last valid pool.
371  *
372  * @return
373  *   Number of WQEs in burst.
374  */
375 static uint16_t
376 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
377 {
378         volatile struct mlx5_aso_wqe *wqe;
379         struct mlx5_aso_sq *sq = &mng->aso_sq;
380         struct mlx5_aso_age_pool *pool;
381         uint16_t size = 1 << sq->log_desc_n;
382         uint16_t mask = size - 1;
383         uint16_t max;
384         uint16_t start_pi = sq->pi;
385
386         max = RTE_MIN(size - (uint16_t)(sq->pi - sq->ci), n - sq->next);
387         if (unlikely(!max))
388                 return 0;
389         sq->elts[start_pi & mask].burst_size = max;
390         do {
391                 wqe = &sq->wqes[sq->pi & mask];
392                 rte_prefetch0(&sq->wqes[(sq->pi + 1) & mask]);
393                 /* Fill next WQE. */
394                 rte_spinlock_lock(&mng->resize_sl);
395                 pool = mng->pools[sq->next];
396                 rte_spinlock_unlock(&mng->resize_sl);
397                 sq->elts[sq->pi & mask].pool = pool;
398                 wqe->general_cseg.misc =
399                                 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
400                                                  (pool->flow_hit_aso_obj))->id);
401                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
402                                                          MLX5_COMP_MODE_OFFSET);
403                 wqe->general_cseg.opcode = rte_cpu_to_be_32
404                                                 (MLX5_OPCODE_ACCESS_ASO |
405                                                  ASO_OP_MOD_FLOW_HIT << 24 |
406                                                  sq->pi << 9);
407                 sq->pi++;
408                 sq->next++;
409                 max--;
410         } while (max);
411         wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
412                                                          MLX5_COMP_MODE_OFFSET);
413         rte_io_wmb();
414         sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi << 1);
415         rte_wmb();
416         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
417         rte_wmb();
418         return sq->elts[start_pi & mask].burst_size;
419 }
420
421 /**
422  * Debug utility function. Dump contents of error CQE and WQE.
423  *
424  * @param[in] cqe
425  *   Error CQE to dump.
426  * @param[in] wqe
427  *   Error WQE to dump.
428  */
429 static void
430 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
431 {
432         int i;
433
434         DRV_LOG(ERR, "Error cqe:");
435         for (i = 0; i < 16; i += 4)
436                 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
437                         cqe[i + 2], cqe[i + 3]);
438         DRV_LOG(ERR, "\nError wqe:");
439         for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
440                 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
441                         wqe[i + 2], wqe[i + 3]);
442 }
443
444 /**
445  * Handle case of error CQE.
446  *
447  * @param[in] sq
448  *   ASO SQ to use.
449  */
450 static void
451 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
452 {
453         struct mlx5_aso_cq *cq = &sq->cq;
454         uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
455         volatile struct mlx5_err_cqe *cqe =
456                                 (volatile struct mlx5_err_cqe *)&cq->cqes[idx];
457
458         cq->errors++;
459         idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
460         mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
461                                  (volatile uint32_t *)&sq->wqes[idx]);
462 }
463
464 /**
465  * Update ASO objects upon completion.
466  *
467  * @param[in] sh
468  *   Shared device context.
469  * @param[in] n
470  *   Number of completed ASO objects.
471  */
472 static void
473 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
474 {
475         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
476         struct mlx5_aso_sq *sq = &mng->aso_sq;
477         struct mlx5_age_info *age_info;
478         const uint16_t size = 1 << sq->log_desc_n;
479         const uint16_t mask = size - 1;
480         const uint64_t curr = MLX5_CURR_TIME_SEC;
481         uint16_t expected = AGE_CANDIDATE;
482         uint16_t i;
483
484         for (i = 0; i < n; ++i) {
485                 uint16_t idx = (sq->ci + i) & mask;
486                 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
487                 uint64_t diff = curr - pool->time_of_last_age_check;
488                 uint64_t *addr = sq->mr.buf;
489                 int j;
490
491                 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
492                 pool->time_of_last_age_check = curr;
493                 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
494                         struct mlx5_aso_age_action *act = &pool->actions[j];
495                         struct mlx5_age_param *ap = &act->age_params;
496                         uint8_t byte;
497                         uint8_t offset;
498                         uint8_t *u8addr;
499                         uint8_t hit;
500
501                         if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
502                                             AGE_CANDIDATE)
503                                 continue;
504                         byte = 63 - (j / 8);
505                         offset = j % 8;
506                         u8addr = (uint8_t *)addr;
507                         hit = (u8addr[byte] >> offset) & 0x1;
508                         if (hit) {
509                                 __atomic_store_n(&ap->sec_since_last_hit, 0,
510                                                  __ATOMIC_RELAXED);
511                         } else {
512                                 struct mlx5_priv *priv;
513
514                                 __atomic_fetch_add(&ap->sec_since_last_hit,
515                                                    diff, __ATOMIC_RELAXED);
516                                 /* If timeout passed add to aged-out list. */
517                                 if (ap->sec_since_last_hit <= ap->timeout)
518                                         continue;
519                                 priv =
520                                 rte_eth_devices[ap->port_id].data->dev_private;
521                                 age_info = GET_PORT_AGE_INFO(priv);
522                                 rte_spinlock_lock(&age_info->aged_sl);
523                                 if (__atomic_compare_exchange_n(&ap->state,
524                                                                 &expected,
525                                                                 AGE_TMOUT,
526                                                                 false,
527                                                                __ATOMIC_RELAXED,
528                                                             __ATOMIC_RELAXED)) {
529                                         LIST_INSERT_HEAD(&age_info->aged_aso,
530                                                          act, next);
531                                         MLX5_AGE_SET(age_info,
532                                                      MLX5_AGE_EVENT_NEW);
533                                 }
534                                 rte_spinlock_unlock(&age_info->aged_sl);
535                         }
536                 }
537         }
538         mlx5_age_event_prepare(sh);
539 }
540
541 /**
542  * Handle completions from WQEs sent to ASO SQ.
543  *
544  * @param[in] sh
545  *   Shared device context.
546  *
547  * @return
548  *   Number of CQEs handled.
549  */
550 static uint16_t
551 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
552 {
553         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
554         struct mlx5_aso_sq *sq = &mng->aso_sq;
555         struct mlx5_aso_cq *cq = &sq->cq;
556         volatile struct mlx5_cqe *restrict cqe;
557         const unsigned int cq_size = 1 << cq->log_desc_n;
558         const unsigned int mask = cq_size - 1;
559         uint32_t idx;
560         uint32_t next_idx = cq->cq_ci & mask;
561         const uint16_t max = (uint16_t)(sq->pi - sq->ci);
562         uint16_t i = 0;
563         int ret;
564         if (unlikely(!max))
565                 return 0;
566         do {
567                 idx = next_idx;
568                 next_idx = (cq->cq_ci + 1) & mask;
569                 rte_prefetch0(&cq->cqes[next_idx]);
570                 cqe = &cq->cqes[idx];
571                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
572                 /*
573                  * Be sure owner read is done before any other cookie field or
574                  * opaque field.
575                  */
576                 rte_io_rmb();
577                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
578                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
579                                 break;
580                         mlx5_aso_cqe_err_handle(sq);
581                 } else {
582                         i += sq->elts[(sq->ci + i) & mask].burst_size;
583                 }
584                 cq->cq_ci++;
585         } while (1);
586         if (likely(i)) {
587                 mlx5_aso_age_action_update(sh, i);
588                 sq->ci += i;
589                 rte_io_wmb();
590                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
591         }
592         return i;
593 }
594
595 /**
596  * Periodically read CQEs and send WQEs to ASO SQ.
597  *
598  * @param[in] arg
599  *   Shared device context containing the ASO SQ.
600  */
601 static void
602 mlx5_flow_aso_alarm(void *arg)
603 {
604         struct mlx5_dev_ctx_shared *sh = arg;
605         struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
606         uint32_t us = 100u;
607         uint16_t n;
608
609         rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
610         n = sh->aso_age_mng->next;
611         rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
612         mlx5_aso_completion_handle(sh);
613         if (sq->next == n) {
614                 /* End of loop: wait 1 second. */
615                 us = US_PER_S;
616                 sq->next = 0;
617         }
618         mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
619         if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
620                 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
621 }
622
623 /**
624  * API to start ASO access using ASO SQ.
625  *
626  * @param[in] sh
627  *   Pointer to shared device context.
628  *
629  * @return
630  *   0 on success, a negative errno value otherwise and rte_errno is set.
631  */
632 int
633 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
634 {
635         if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
636                 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
637                 return -rte_errno;
638         }
639         return 0;
640 }
641
642 /**
643  * API to stop ASO access using ASO SQ.
644  *
645  * @param[in] sh
646  *   Pointer to shared device context.
647  *
648  * @return
649  *   0 on success, a negative errno value otherwise and rte_errno is set.
650  */
651 int
652 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
653 {
654         int retries = 1024;
655
656         if (!sh->aso_age_mng->aso_sq.sq)
657                 return -EINVAL;
658         rte_errno = 0;
659         while (--retries) {
660                 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
661                 if (rte_errno != EINPROGRESS)
662                         break;
663                 rte_pause();
664         }
665         return -rte_errno;
666 }