net/mlx5: fix device name size on Windows
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_age.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <mlx5_prm.h>
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7
8 #include <mlx5_malloc.h>
9 #include <mlx5_common_os.h>
10
11 #include "mlx5.h"
12 #include "mlx5_flow.h"
13
14 /**
15  * Destroy Completion Queue used for ASO access.
16  *
17  * @param[in] cq
18  *   ASO CQ to destroy.
19  */
20 static void
21 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
22 {
23         if (cq->cq)
24                 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
25         if (cq->umem_obj)
26                 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
27         if (cq->umem_buf)
28                 mlx5_free((void *)(uintptr_t)cq->umem_buf);
29         memset(cq, 0, sizeof(*cq));
30 }
31
32 /**
33  * Create Completion Queue used for ASO access.
34  *
35  * @param[in] ctx
36  *   Context returned from mlx5 open_device() glue function.
37  * @param[in/out] cq
38  *   Pointer to CQ to create.
39  * @param[in] log_desc_n
40  *   Log of number of descriptors in queue.
41  * @param[in] socket
42  *   Socket to use for allocation.
43  * @param[in] uar_page_id
44  *   UAR page ID to use.
45  * @param[in] eqn
46  *   EQ number.
47  *
48  * @return
49  *   0 on success, a negative errno value otherwise and rte_errno is set.
50  */
51 static int
52 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
53                    int socket, int uar_page_id, uint32_t eqn)
54 {
55         struct mlx5_devx_cq_attr attr = { 0 };
56         size_t pgsize = sysconf(_SC_PAGESIZE);
57         uint32_t umem_size;
58         uint16_t cq_size = 1 << log_desc_n;
59
60         cq->log_desc_n = log_desc_n;
61         umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
62         cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
63                                    4096, socket);
64         if (!cq->umem_buf) {
65                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
66                 rte_errno = ENOMEM;
67                 return -ENOMEM;
68         }
69         cq->umem_obj = mlx5_glue->devx_umem_reg(ctx,
70                                                 (void *)(uintptr_t)cq->umem_buf,
71                                                 umem_size,
72                                                 IBV_ACCESS_LOCAL_WRITE);
73         if (!cq->umem_obj) {
74                 DRV_LOG(ERR, "Failed to register umem for aso CQ.");
75                 goto error;
76         }
77         attr.q_umem_valid = 1;
78         attr.db_umem_valid = 1;
79         attr.use_first_only = 0;
80         attr.overrun_ignore = 0;
81         attr.uar_page_id = uar_page_id;
82         attr.q_umem_id = mlx5_os_get_umem_id(cq->umem_obj);
83         attr.q_umem_offset = 0;
84         attr.db_umem_id = attr.q_umem_id;
85         attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
86         attr.eqn = eqn;
87         attr.log_cq_size = log_desc_n;
88         attr.log_page_size = rte_log2_u32(pgsize);
89         cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr);
90         if (!cq->cq)
91                 goto error;
92         cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
93         cq->cq_ci = 0;
94         memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
95         return 0;
96 error:
97         mlx5_aso_cq_destroy(cq);
98         return -1;
99 }
100
101 /**
102  * Free MR resources.
103  *
104  * @param[in] mr
105  *   MR to free.
106  */
107 static void
108 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
109 {
110         claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
111         if (!mr->is_indirect && mr->umem)
112                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
113         mlx5_free(mr->buf);
114         memset(mr, 0, sizeof(*mr));
115 }
116
117 /**
118  * Register Memory Region.
119  *
120  * @param[in] ctx
121  *   Context returned from mlx5 open_device() glue function.
122  * @param[in] length
123  *   Size of MR buffer.
124  * @param[in/out] mr
125  *   Pointer to MR to create.
126  * @param[in] socket
127  *   Socket to use for allocation.
128  * @param[in] pdn
129  *   Protection Domain number to use.
130  *
131  * @return
132  *   0 on success, a negative errno value otherwise and rte_errno is set.
133  */
134 static int
135 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
136                      int socket, int pdn)
137 {
138         struct mlx5_devx_mkey_attr mkey_attr;
139
140         mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
141                               socket);
142         if (!mr->buf) {
143                 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
144                 return -1;
145         }
146         mr->umem = mlx5_glue->devx_umem_reg(ctx, mr->buf, length,
147                                                  IBV_ACCESS_LOCAL_WRITE);
148         if (!mr->umem) {
149                 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
150                 goto error;
151         }
152         mkey_attr.addr = (uintptr_t)mr->buf;
153         mkey_attr.size = length;
154         mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
155         mkey_attr.pd = pdn;
156         mkey_attr.pg_access = 1;
157         mkey_attr.klm_array = NULL;
158         mkey_attr.klm_num = 0;
159         mkey_attr.relaxed_ordering_read = 0;
160         mkey_attr.relaxed_ordering_write = 0;
161         mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
162         if (!mr->mkey) {
163                 DRV_LOG(ERR, "Failed to create direct Mkey.");
164                 goto error;
165         }
166         mr->length = length;
167         mr->is_indirect = false;
168         return 0;
169 error:
170         if (mr->umem)
171                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
172         mlx5_free(mr->buf);
173         return -1;
174 }
175
176 /**
177  * Destroy Send Queue used for ASO access.
178  *
179  * @param[in] sq
180  *   ASO SQ to destroy.
181  */
182 static void
183 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
184 {
185         if (sq->wqe_umem) {
186                 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
187                 sq->wqe_umem = NULL;
188         }
189         if (sq->umem_buf) {
190                 mlx5_free((void *)(uintptr_t)sq->umem_buf);
191                 sq->umem_buf = NULL;
192         }
193         if (sq->sq) {
194                 mlx5_devx_cmd_destroy(sq->sq);
195                 sq->sq = NULL;
196         }
197         if (sq->cq.cq)
198                 mlx5_aso_cq_destroy(&sq->cq);
199         mlx5_aso_devx_dereg_mr(&sq->mr);
200         memset(sq, 0, sizeof(*sq));
201 }
202
203 /**
204  * Initialize Send Queue used for ASO access.
205  *
206  * @param[in] sq
207  *   ASO SQ to initialize.
208  */
209 static void
210 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
211 {
212         volatile struct mlx5_aso_wqe *restrict wqe;
213         int i;
214         int size = 1 << sq->log_desc_n;
215         uint64_t addr;
216
217         /* All the next fields state should stay constant. */
218         for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
219                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
220                                                           (sizeof(*wqe) >> 4));
221                 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
222                 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
223                                             MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
224                 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
225                 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
226                 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
227                         (0u |
228                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
229                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
230                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
231                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
232                 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
233         }
234 }
235
236 /**
237  * Create Send Queue used for ASO access.
238  *
239  * @param[in] ctx
240  *   Context returned from mlx5 open_device() glue function.
241  * @param[in/out] sq
242  *   Pointer to SQ to create.
243  * @param[in] socket
244  *   Socket to use for allocation.
245  * @param[in] uar
246  *   User Access Region object.
247  * @param[in] pdn
248  *   Protection Domain number to use.
249  * @param[in] eqn
250  *   EQ number.
251  * @param[in] log_desc_n
252  *   Log of number of descriptors in queue.
253  *
254  * @return
255  *   0 on success, a negative errno value otherwise and rte_errno is set.
256  */
257 static int
258 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
259                    struct mlx5dv_devx_uar *uar, uint32_t pdn,
260                    uint32_t eqn,  uint16_t log_desc_n)
261 {
262         struct mlx5_devx_create_sq_attr attr = { 0 };
263         struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
264         size_t pgsize = sysconf(_SC_PAGESIZE);
265         struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
266         uint32_t sq_desc_n = 1 << log_desc_n;
267         uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
268         int ret;
269
270         if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
271                                  sq_desc_n, &sq->mr, socket, pdn))
272                 return -1;
273         if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
274                                 mlx5_os_get_devx_uar_page_id(uar), eqn))
275                 goto error;
276         sq->log_desc_n = log_desc_n;
277         sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
278                                    sizeof(*sq->db_rec) * 2, 4096, socket);
279         if (!sq->umem_buf) {
280                 DRV_LOG(ERR, "Can't allocate wqe buffer.");
281                 return -ENOMEM;
282         }
283         sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx,
284                                                 (void *)(uintptr_t)sq->umem_buf,
285                                                 wq_size +
286                                                 sizeof(*sq->db_rec) * 2,
287                                                 IBV_ACCESS_LOCAL_WRITE);
288         if (!sq->wqe_umem) {
289                 DRV_LOG(ERR, "Failed to register umem for SQ.");
290                 rte_errno = ENOMEM;
291                 goto error;
292         }
293         attr.state = MLX5_SQC_STATE_RST;
294         attr.tis_lst_sz = 0;
295         attr.tis_num = 0;
296         attr.user_index = 0xFFFF;
297         attr.cqn = sq->cq.cq->id;
298         wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(uar);
299         wq_attr->pd = pdn;
300         wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
301         wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
302         wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
303         wq_attr->wq_umem_offset = 0;
304         wq_attr->wq_umem_valid = 1;
305         wq_attr->log_wq_stride = 6;
306         wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
307         wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
308         wq_attr->dbr_addr = wq_size;
309         wq_attr->dbr_umem_valid = 1;
310         sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
311         if (!sq->sq) {
312                 DRV_LOG(ERR, "Can't create sq object.");
313                 rte_errno  = ENOMEM;
314                 goto error;
315         }
316         modify_attr.state = MLX5_SQC_STATE_RDY;
317         ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
318         if (ret) {
319                 DRV_LOG(ERR, "Can't change sq state to ready.");
320                 rte_errno  = ENOMEM;
321                 goto error;
322         }
323         sq->pi = 0;
324         sq->head = 0;
325         sq->tail = 0;
326         sq->sqn = sq->sq->id;
327         sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
328         sq->uar_addr = (volatile uint64_t *)((uint8_t *)uar->base_addr + 0x800);
329         mlx5_aso_init_sq(sq);
330         return 0;
331 error:
332         mlx5_aso_destroy_sq(sq);
333         return -1;
334 }
335
336 /**
337  * API to create and initialize Send Queue used for ASO access.
338  *
339  * @param[in] sh
340  *   Pointer to shared device context.
341  *
342  * @return
343  *   0 on success, a negative errno value otherwise and rte_errno is set.
344  */
345 int
346 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
347 {
348         return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
349                                   sh->tx_uar, sh->pdn, sh->eqn,
350                                   MLX5_ASO_QUEUE_LOG_DESC);
351 }
352
353 /**
354  * API to destroy Send Queue used for ASO access.
355  *
356  * @param[in] sh
357  *   Pointer to shared device context.
358  */
359 void
360 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
361 {
362         mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
363 }
364
365 /**
366  * Write a burst of WQEs to ASO SQ.
367  *
368  * @param[in] mng
369  *   ASO management data, contains the SQ.
370  * @param[in] n
371  *   Index of the last valid pool.
372  *
373  * @return
374  *   Number of WQEs in burst.
375  */
376 static uint16_t
377 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
378 {
379         volatile struct mlx5_aso_wqe *wqe;
380         struct mlx5_aso_sq *sq = &mng->aso_sq;
381         struct mlx5_aso_age_pool *pool;
382         uint16_t size = 1 << sq->log_desc_n;
383         uint16_t mask = size - 1;
384         uint16_t max;
385         uint16_t start_head = sq->head;
386
387         max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
388         if (unlikely(!max))
389                 return 0;
390         sq->elts[start_head & mask].burst_size = max;
391         do {
392                 wqe = &sq->wqes[sq->head & mask];
393                 rte_prefetch0(&sq->wqes[(sq->head + 1) & mask]);
394                 /* Fill next WQE. */
395                 rte_spinlock_lock(&mng->resize_sl);
396                 pool = mng->pools[sq->next];
397                 rte_spinlock_unlock(&mng->resize_sl);
398                 sq->elts[sq->head & mask].pool = pool;
399                 wqe->general_cseg.misc =
400                                 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
401                                                  (pool->flow_hit_aso_obj))->id);
402                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
403                                                          MLX5_COMP_MODE_OFFSET);
404                 wqe->general_cseg.opcode = rte_cpu_to_be_32
405                                                 (MLX5_OPCODE_ACCESS_ASO |
406                                                  (ASO_OPC_MOD_FLOW_HIT <<
407                                                   WQE_CSEG_OPC_MOD_OFFSET) |
408                                                  (sq->pi <<
409                                                   WQE_CSEG_WQE_INDEX_OFFSET));
410                 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
411                 sq->head++;
412                 sq->next++;
413                 max--;
414         } while (max);
415         wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
416                                                          MLX5_COMP_MODE_OFFSET);
417         rte_io_wmb();
418         sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
419         rte_wmb();
420         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
421         rte_wmb();
422         return sq->elts[start_head & mask].burst_size;
423 }
424
425 /**
426  * Debug utility function. Dump contents of error CQE and WQE.
427  *
428  * @param[in] cqe
429  *   Error CQE to dump.
430  * @param[in] wqe
431  *   Error WQE to dump.
432  */
433 static void
434 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
435 {
436         int i;
437
438         DRV_LOG(ERR, "Error cqe:");
439         for (i = 0; i < 16; i += 4)
440                 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
441                         cqe[i + 2], cqe[i + 3]);
442         DRV_LOG(ERR, "\nError wqe:");
443         for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
444                 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
445                         wqe[i + 2], wqe[i + 3]);
446 }
447
448 /**
449  * Handle case of error CQE.
450  *
451  * @param[in] sq
452  *   ASO SQ to use.
453  */
454 static void
455 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
456 {
457         struct mlx5_aso_cq *cq = &sq->cq;
458         uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
459         volatile struct mlx5_err_cqe *cqe =
460                                 (volatile struct mlx5_err_cqe *)&cq->cqes[idx];
461
462         cq->errors++;
463         idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
464         mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
465                                  (volatile uint32_t *)&sq->wqes[idx]);
466 }
467
468 /**
469  * Update ASO objects upon completion.
470  *
471  * @param[in] sh
472  *   Shared device context.
473  * @param[in] n
474  *   Number of completed ASO objects.
475  */
476 static void
477 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
478 {
479         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
480         struct mlx5_aso_sq *sq = &mng->aso_sq;
481         struct mlx5_age_info *age_info;
482         const uint16_t size = 1 << sq->log_desc_n;
483         const uint16_t mask = size - 1;
484         const uint64_t curr = MLX5_CURR_TIME_SEC;
485         uint16_t expected = AGE_CANDIDATE;
486         uint16_t i;
487
488         for (i = 0; i < n; ++i) {
489                 uint16_t idx = (sq->tail + i) & mask;
490                 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
491                 uint64_t diff = curr - pool->time_of_last_age_check;
492                 uint64_t *addr = sq->mr.buf;
493                 int j;
494
495                 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
496                 pool->time_of_last_age_check = curr;
497                 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
498                         struct mlx5_aso_age_action *act = &pool->actions[j];
499                         struct mlx5_age_param *ap = &act->age_params;
500                         uint8_t byte;
501                         uint8_t offset;
502                         uint8_t *u8addr;
503                         uint8_t hit;
504
505                         if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
506                                             AGE_CANDIDATE)
507                                 continue;
508                         byte = 63 - (j / 8);
509                         offset = j % 8;
510                         u8addr = (uint8_t *)addr;
511                         hit = (u8addr[byte] >> offset) & 0x1;
512                         if (hit) {
513                                 __atomic_store_n(&ap->sec_since_last_hit, 0,
514                                                  __ATOMIC_RELAXED);
515                         } else {
516                                 struct mlx5_priv *priv;
517
518                                 __atomic_fetch_add(&ap->sec_since_last_hit,
519                                                    diff, __ATOMIC_RELAXED);
520                                 /* If timeout passed add to aged-out list. */
521                                 if (ap->sec_since_last_hit <= ap->timeout)
522                                         continue;
523                                 priv =
524                                 rte_eth_devices[ap->port_id].data->dev_private;
525                                 age_info = GET_PORT_AGE_INFO(priv);
526                                 rte_spinlock_lock(&age_info->aged_sl);
527                                 if (__atomic_compare_exchange_n(&ap->state,
528                                                                 &expected,
529                                                                 AGE_TMOUT,
530                                                                 false,
531                                                                __ATOMIC_RELAXED,
532                                                             __ATOMIC_RELAXED)) {
533                                         LIST_INSERT_HEAD(&age_info->aged_aso,
534                                                          act, next);
535                                         MLX5_AGE_SET(age_info,
536                                                      MLX5_AGE_EVENT_NEW);
537                                 }
538                                 rte_spinlock_unlock(&age_info->aged_sl);
539                         }
540                 }
541         }
542         mlx5_age_event_prepare(sh);
543 }
544
545 /**
546  * Handle completions from WQEs sent to ASO SQ.
547  *
548  * @param[in] sh
549  *   Shared device context.
550  *
551  * @return
552  *   Number of CQEs handled.
553  */
554 static uint16_t
555 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
556 {
557         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
558         struct mlx5_aso_sq *sq = &mng->aso_sq;
559         struct mlx5_aso_cq *cq = &sq->cq;
560         volatile struct mlx5_cqe *restrict cqe;
561         const unsigned int cq_size = 1 << cq->log_desc_n;
562         const unsigned int mask = cq_size - 1;
563         uint32_t idx;
564         uint32_t next_idx = cq->cq_ci & mask;
565         const uint16_t max = (uint16_t)(sq->head - sq->tail);
566         uint16_t i = 0;
567         int ret;
568         if (unlikely(!max))
569                 return 0;
570         do {
571                 idx = next_idx;
572                 next_idx = (cq->cq_ci + 1) & mask;
573                 rte_prefetch0(&cq->cqes[next_idx]);
574                 cqe = &cq->cqes[idx];
575                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
576                 /*
577                  * Be sure owner read is done before any other cookie field or
578                  * opaque field.
579                  */
580                 rte_io_rmb();
581                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
582                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
583                                 break;
584                         mlx5_aso_cqe_err_handle(sq);
585                 } else {
586                         i += sq->elts[(sq->tail + i) & mask].burst_size;
587                 }
588                 cq->cq_ci++;
589         } while (1);
590         if (likely(i)) {
591                 mlx5_aso_age_action_update(sh, i);
592                 sq->tail += i;
593                 rte_io_wmb();
594                 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
595         }
596         return i;
597 }
598
599 /**
600  * Periodically read CQEs and send WQEs to ASO SQ.
601  *
602  * @param[in] arg
603  *   Shared device context containing the ASO SQ.
604  */
605 static void
606 mlx5_flow_aso_alarm(void *arg)
607 {
608         struct mlx5_dev_ctx_shared *sh = arg;
609         struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
610         uint32_t us = 100u;
611         uint16_t n;
612
613         rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
614         n = sh->aso_age_mng->next;
615         rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
616         mlx5_aso_completion_handle(sh);
617         if (sq->next == n) {
618                 /* End of loop: wait 1 second. */
619                 us = US_PER_S;
620                 sq->next = 0;
621         }
622         mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
623         if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
624                 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
625 }
626
627 /**
628  * API to start ASO access using ASO SQ.
629  *
630  * @param[in] sh
631  *   Pointer to shared device context.
632  *
633  * @return
634  *   0 on success, a negative errno value otherwise and rte_errno is set.
635  */
636 int
637 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
638 {
639         if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
640                 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
641                 return -rte_errno;
642         }
643         return 0;
644 }
645
646 /**
647  * API to stop ASO access using ASO SQ.
648  *
649  * @param[in] sh
650  *   Pointer to shared device context.
651  *
652  * @return
653  *   0 on success, a negative errno value otherwise and rte_errno is set.
654  */
655 int
656 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
657 {
658         int retries = 1024;
659
660         if (!sh->aso_age_mng->aso_sq.sq)
661                 return -EINVAL;
662         rte_errno = 0;
663         while (--retries) {
664                 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
665                 if (rte_errno != EINPROGRESS)
666                         break;
667                 rte_pause();
668         }
669         return -rte_errno;
670 }