net/mlx5: initialize flow meter ASO SQ
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_aso.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <mlx5_prm.h>
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
8
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
12
13 #include "mlx5.h"
14 #include "mlx5_flow.h"
15
16
17 /**
18  * Destroy Completion Queue used for ASO access.
19  *
20  * @param[in] cq
21  *   ASO CQ to destroy.
22  */
23 static void
24 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
25 {
26         if (cq->cq_obj.cq)
27                 mlx5_devx_cq_destroy(&cq->cq_obj);
28         memset(cq, 0, sizeof(*cq));
29 }
30
31 /**
32  * Create Completion Queue used for ASO access.
33  *
34  * @param[in] ctx
35  *   Context returned from mlx5 open_device() glue function.
36  * @param[in/out] cq
37  *   Pointer to CQ to create.
38  * @param[in] log_desc_n
39  *   Log of number of descriptors in queue.
40  * @param[in] socket
41  *   Socket to use for allocation.
42  * @param[in] uar_page_id
43  *   UAR page ID to use.
44  *
45  * @return
46  *   0 on success, a negative errno value otherwise and rte_errno is set.
47  */
48 static int
49 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
50                    int socket, int uar_page_id)
51 {
52         struct mlx5_devx_cq_attr attr = {
53                 .uar_page_id = uar_page_id,
54         };
55
56         cq->log_desc_n = log_desc_n;
57         cq->cq_ci = 0;
58         return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
59 }
60
61 /**
62  * Free MR resources.
63  *
64  * @param[in] mr
65  *   MR to free.
66  */
67 static void
68 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
69 {
70         claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
71         if (!mr->is_indirect && mr->umem)
72                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
73         mlx5_free(mr->buf);
74         memset(mr, 0, sizeof(*mr));
75 }
76
77 /**
78  * Register Memory Region.
79  *
80  * @param[in] ctx
81  *   Context returned from mlx5 open_device() glue function.
82  * @param[in] length
83  *   Size of MR buffer.
84  * @param[in/out] mr
85  *   Pointer to MR to create.
86  * @param[in] socket
87  *   Socket to use for allocation.
88  * @param[in] pdn
89  *   Protection Domain number to use.
90  *
91  * @return
92  *   0 on success, a negative errno value otherwise and rte_errno is set.
93  */
94 static int
95 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
96                      int socket, int pdn)
97 {
98         struct mlx5_devx_mkey_attr mkey_attr;
99
100         mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
101                               socket);
102         if (!mr->buf) {
103                 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
104                 return -1;
105         }
106         mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
107                                                  IBV_ACCESS_LOCAL_WRITE);
108         if (!mr->umem) {
109                 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
110                 goto error;
111         }
112         mkey_attr.addr = (uintptr_t)mr->buf;
113         mkey_attr.size = length;
114         mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
115         mkey_attr.pd = pdn;
116         mkey_attr.pg_access = 1;
117         mkey_attr.klm_array = NULL;
118         mkey_attr.klm_num = 0;
119         mkey_attr.relaxed_ordering_read = 0;
120         mkey_attr.relaxed_ordering_write = 0;
121         mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
122         if (!mr->mkey) {
123                 DRV_LOG(ERR, "Failed to create direct Mkey.");
124                 goto error;
125         }
126         mr->length = length;
127         mr->is_indirect = false;
128         return 0;
129 error:
130         if (mr->umem)
131                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
132         mlx5_free(mr->buf);
133         return -1;
134 }
135
136 /**
137  * Destroy Send Queue used for ASO access.
138  *
139  * @param[in] sq
140  *   ASO SQ to destroy.
141  */
142 static void
143 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
144 {
145         mlx5_devx_sq_destroy(&sq->sq_obj);
146         mlx5_aso_cq_destroy(&sq->cq);
147         memset(sq, 0, sizeof(*sq));
148 }
149
150 /**
151  * Initialize Send Queue used for ASO access.
152  *
153  * @param[in] sq
154  *   ASO SQ to initialize.
155  */
156 static void
157 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
158 {
159         volatile struct mlx5_aso_wqe *restrict wqe;
160         int i;
161         int size = 1 << sq->log_desc_n;
162         uint64_t addr;
163
164         /* All the next fields state should stay constant. */
165         for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
166                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
167                                                           (sizeof(*wqe) >> 4));
168                 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
169                 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
170                                             MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
171                 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
172                 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
173                 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
174                         (0u |
175                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
176                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
177                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
178                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
179                 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
180         }
181 }
182
183 /**
184  * Initialize Send Queue used for ASO flow meter access.
185  *
186  * @param[in] sq
187  *   ASO SQ to initialize.
188  */
189 static void
190 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
191 {
192         volatile struct mlx5_aso_wqe *restrict wqe;
193         int i;
194         int size = 1 << sq->log_desc_n;
195         uint32_t idx;
196
197         /* All the next fields state should stay constant. */
198         for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
199                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
200                                                           (sizeof(*wqe) >> 4));
201                 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
202                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
203                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
204                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
205                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
206                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
207                                                          MLX5_COMP_MODE_OFFSET);
208                 for (idx = 0; idx < MLX5_ASO_METERS_PER_WQE;
209                         idx++)
210                         wqe->aso_dseg.mtrs[idx].v_bo_sc_bbog_mm =
211                                 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
212                                 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
213         }
214 }
215
216 /**
217  * Create Send Queue used for ASO access.
218  *
219  * @param[in] ctx
220  *   Context returned from mlx5 open_device() glue function.
221  * @param[in/out] sq
222  *   Pointer to SQ to create.
223  * @param[in] socket
224  *   Socket to use for allocation.
225  * @param[in] uar
226  *   User Access Region object.
227  * @param[in] pdn
228  *   Protection Domain number to use.
229  * @param[in] log_desc_n
230  *   Log of number of descriptors in queue.
231  *
232  * @return
233  *   0 on success, a negative errno value otherwise and rte_errno is set.
234  */
235 static int
236 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
237                    void *uar, uint32_t pdn,  uint16_t log_desc_n,
238                    uint32_t ts_format)
239 {
240         struct mlx5_devx_create_sq_attr attr = {
241                 .user_index = 0xFFFF,
242                 .wq_attr = (struct mlx5_devx_wq_attr){
243                         .pd = pdn,
244                         .uar_page = mlx5_os_get_devx_uar_page_id(uar),
245                 },
246                 .ts_format = mlx5_ts_format_conv(ts_format),
247         };
248         struct mlx5_devx_modify_sq_attr modify_attr = {
249                 .state = MLX5_SQC_STATE_RDY,
250         };
251         uint16_t log_wqbb_n;
252         int ret;
253
254         if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
255                                mlx5_os_get_devx_uar_page_id(uar)))
256                 goto error;
257         sq->log_desc_n = log_desc_n;
258         attr.cqn = sq->cq.cq_obj.cq->id;
259         /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
260         log_wqbb_n = log_desc_n + 1;
261         ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
262         if (ret) {
263                 DRV_LOG(ERR, "Can't create SQ object.");
264                 rte_errno = ENOMEM;
265                 goto error;
266         }
267         ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
268         if (ret) {
269                 DRV_LOG(ERR, "Can't change SQ state to ready.");
270                 rte_errno = ENOMEM;
271                 goto error;
272         }
273         sq->pi = 0;
274         sq->head = 0;
275         sq->tail = 0;
276         sq->sqn = sq->sq_obj.sq->id;
277         sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
278         return 0;
279 error:
280         mlx5_aso_destroy_sq(sq);
281         return -1;
282 }
283
284 /**
285  * API to create and initialize Send Queue used for ASO access.
286  *
287  * @param[in] sh
288  *   Pointer to shared device context.
289  *
290  * @return
291  *   0 on success, a negative errno value otherwise and rte_errno is set.
292  */
293 int
294 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
295                         enum mlx5_access_aso_opc_mod aso_opc_mod)
296 {
297         uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
298
299         switch (aso_opc_mod) {
300         case ASO_OPC_MOD_FLOW_HIT:
301                 if (mlx5_aso_devx_reg_mr(sh->ctx,
302                         (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
303                         sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn))
304                         return -1;
305                 if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
306                                   sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
307                                   sh->sq_ts_format)) {
308                         mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
309                         return -1;
310                 }
311                 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
312                 break;
313         case ASO_OPC_MOD_POLICER:
314                 if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0,
315                                   sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
316                                   sh->sq_ts_format))
317                         return -1;
318                 mlx5_aso_mtr_init_sq(&sh->mtrmng->sq);
319                 break;
320         default:
321                 DRV_LOG(ERR, "Unknown ASO operation mode");
322                 return -1;
323         }
324         return 0;
325 }
326
327 /**
328  * API to destroy Send Queue used for ASO access.
329  *
330  * @param[in] sh
331  *   Pointer to shared device context.
332  */
333 void
334 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
335                                 enum mlx5_access_aso_opc_mod aso_opc_mod)
336 {
337         struct mlx5_aso_sq *sq;
338
339         switch (aso_opc_mod) {
340         case ASO_OPC_MOD_FLOW_HIT:
341                 mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
342                 sq = &sh->aso_age_mng->aso_sq;
343                 break;
344         case ASO_OPC_MOD_POLICER:
345                 sq = &sh->mtrmng->sq;
346                 break;
347         default:
348                 DRV_LOG(ERR, "Unknown ASO operation mode");
349                 return;
350         }
351         mlx5_aso_destroy_sq(sq);
352 }
353
354 /**
355  * Write a burst of WQEs to ASO SQ.
356  *
357  * @param[in] mng
358  *   ASO management data, contains the SQ.
359  * @param[in] n
360  *   Index of the last valid pool.
361  *
362  * @return
363  *   Number of WQEs in burst.
364  */
365 static uint16_t
366 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
367 {
368         volatile struct mlx5_aso_wqe *wqe;
369         struct mlx5_aso_sq *sq = &mng->aso_sq;
370         struct mlx5_aso_age_pool *pool;
371         uint16_t size = 1 << sq->log_desc_n;
372         uint16_t mask = size - 1;
373         uint16_t max;
374         uint16_t start_head = sq->head;
375
376         max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
377         if (unlikely(!max))
378                 return 0;
379         sq->elts[start_head & mask].burst_size = max;
380         do {
381                 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
382                 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
383                 /* Fill next WQE. */
384                 rte_spinlock_lock(&mng->resize_sl);
385                 pool = mng->pools[sq->next];
386                 rte_spinlock_unlock(&mng->resize_sl);
387                 sq->elts[sq->head & mask].pool = pool;
388                 wqe->general_cseg.misc =
389                                 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
390                                                  (pool->flow_hit_aso_obj))->id);
391                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
392                                                          MLX5_COMP_MODE_OFFSET);
393                 wqe->general_cseg.opcode = rte_cpu_to_be_32
394                                                 (MLX5_OPCODE_ACCESS_ASO |
395                                                  (ASO_OPC_MOD_FLOW_HIT <<
396                                                   WQE_CSEG_OPC_MOD_OFFSET) |
397                                                  (sq->pi <<
398                                                   WQE_CSEG_WQE_INDEX_OFFSET));
399                 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
400                 sq->head++;
401                 sq->next++;
402                 max--;
403         } while (max);
404         wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
405                                                          MLX5_COMP_MODE_OFFSET);
406         rte_io_wmb();
407         sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
408         rte_wmb();
409         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
410         rte_wmb();
411         return sq->elts[start_head & mask].burst_size;
412 }
413
414 /**
415  * Debug utility function. Dump contents of error CQE and WQE.
416  *
417  * @param[in] cqe
418  *   Error CQE to dump.
419  * @param[in] wqe
420  *   Error WQE to dump.
421  */
422 static void
423 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
424 {
425         int i;
426
427         DRV_LOG(ERR, "Error cqe:");
428         for (i = 0; i < 16; i += 4)
429                 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
430                         cqe[i + 2], cqe[i + 3]);
431         DRV_LOG(ERR, "\nError wqe:");
432         for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
433                 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
434                         wqe[i + 2], wqe[i + 3]);
435 }
436
437 /**
438  * Handle case of error CQE.
439  *
440  * @param[in] sq
441  *   ASO SQ to use.
442  */
443 static void
444 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
445 {
446         struct mlx5_aso_cq *cq = &sq->cq;
447         uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
448         volatile struct mlx5_err_cqe *cqe =
449                         (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
450
451         cq->errors++;
452         idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
453         mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
454                                (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
455 }
456
457 /**
458  * Update ASO objects upon completion.
459  *
460  * @param[in] sh
461  *   Shared device context.
462  * @param[in] n
463  *   Number of completed ASO objects.
464  */
465 static void
466 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
467 {
468         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
469         struct mlx5_aso_sq *sq = &mng->aso_sq;
470         struct mlx5_age_info *age_info;
471         const uint16_t size = 1 << sq->log_desc_n;
472         const uint16_t mask = size - 1;
473         const uint64_t curr = MLX5_CURR_TIME_SEC;
474         uint16_t expected = AGE_CANDIDATE;
475         uint16_t i;
476
477         for (i = 0; i < n; ++i) {
478                 uint16_t idx = (sq->tail + i) & mask;
479                 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
480                 uint64_t diff = curr - pool->time_of_last_age_check;
481                 uint64_t *addr = sq->mr.buf;
482                 int j;
483
484                 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
485                 pool->time_of_last_age_check = curr;
486                 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
487                         struct mlx5_aso_age_action *act = &pool->actions[j];
488                         struct mlx5_age_param *ap = &act->age_params;
489                         uint8_t byte;
490                         uint8_t offset;
491                         uint8_t *u8addr;
492                         uint8_t hit;
493
494                         if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
495                                             AGE_CANDIDATE)
496                                 continue;
497                         byte = 63 - (j / 8);
498                         offset = j % 8;
499                         u8addr = (uint8_t *)addr;
500                         hit = (u8addr[byte] >> offset) & 0x1;
501                         if (hit) {
502                                 __atomic_store_n(&ap->sec_since_last_hit, 0,
503                                                  __ATOMIC_RELAXED);
504                         } else {
505                                 struct mlx5_priv *priv;
506
507                                 __atomic_fetch_add(&ap->sec_since_last_hit,
508                                                    diff, __ATOMIC_RELAXED);
509                                 /* If timeout passed add to aged-out list. */
510                                 if (ap->sec_since_last_hit <= ap->timeout)
511                                         continue;
512                                 priv =
513                                 rte_eth_devices[ap->port_id].data->dev_private;
514                                 age_info = GET_PORT_AGE_INFO(priv);
515                                 rte_spinlock_lock(&age_info->aged_sl);
516                                 if (__atomic_compare_exchange_n(&ap->state,
517                                                                 &expected,
518                                                                 AGE_TMOUT,
519                                                                 false,
520                                                                __ATOMIC_RELAXED,
521                                                             __ATOMIC_RELAXED)) {
522                                         LIST_INSERT_HEAD(&age_info->aged_aso,
523                                                          act, next);
524                                         MLX5_AGE_SET(age_info,
525                                                      MLX5_AGE_EVENT_NEW);
526                                 }
527                                 rte_spinlock_unlock(&age_info->aged_sl);
528                         }
529                 }
530         }
531         mlx5_age_event_prepare(sh);
532 }
533
534 /**
535  * Handle completions from WQEs sent to ASO SQ.
536  *
537  * @param[in] sh
538  *   Shared device context.
539  *
540  * @return
541  *   Number of CQEs handled.
542  */
543 static uint16_t
544 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
545 {
546         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
547         struct mlx5_aso_sq *sq = &mng->aso_sq;
548         struct mlx5_aso_cq *cq = &sq->cq;
549         volatile struct mlx5_cqe *restrict cqe;
550         const unsigned int cq_size = 1 << cq->log_desc_n;
551         const unsigned int mask = cq_size - 1;
552         uint32_t idx;
553         uint32_t next_idx = cq->cq_ci & mask;
554         const uint16_t max = (uint16_t)(sq->head - sq->tail);
555         uint16_t i = 0;
556         int ret;
557         if (unlikely(!max))
558                 return 0;
559         do {
560                 idx = next_idx;
561                 next_idx = (cq->cq_ci + 1) & mask;
562                 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
563                 cqe = &cq->cq_obj.cqes[idx];
564                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
565                 /*
566                  * Be sure owner read is done before any other cookie field or
567                  * opaque field.
568                  */
569                 rte_io_rmb();
570                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
571                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
572                                 break;
573                         mlx5_aso_cqe_err_handle(sq);
574                 } else {
575                         i += sq->elts[(sq->tail + i) & mask].burst_size;
576                 }
577                 cq->cq_ci++;
578         } while (1);
579         if (likely(i)) {
580                 mlx5_aso_age_action_update(sh, i);
581                 sq->tail += i;
582                 rte_io_wmb();
583                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
584         }
585         return i;
586 }
587
588 /**
589  * Periodically read CQEs and send WQEs to ASO SQ.
590  *
591  * @param[in] arg
592  *   Shared device context containing the ASO SQ.
593  */
594 static void
595 mlx5_flow_aso_alarm(void *arg)
596 {
597         struct mlx5_dev_ctx_shared *sh = arg;
598         struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
599         uint32_t us = 100u;
600         uint16_t n;
601
602         rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
603         n = sh->aso_age_mng->next;
604         rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
605         mlx5_aso_completion_handle(sh);
606         if (sq->next == n) {
607                 /* End of loop: wait 1 second. */
608                 us = US_PER_S;
609                 sq->next = 0;
610         }
611         mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
612         if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
613                 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
614 }
615
616 /**
617  * API to start ASO access using ASO SQ.
618  *
619  * @param[in] sh
620  *   Pointer to shared device context.
621  *
622  * @return
623  *   0 on success, a negative errno value otherwise and rte_errno is set.
624  */
625 int
626 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
627 {
628         if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
629                 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
630                 return -rte_errno;
631         }
632         return 0;
633 }
634
635 /**
636  * API to stop ASO access using ASO SQ.
637  *
638  * @param[in] sh
639  *   Pointer to shared device context.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 int
645 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
646 {
647         int retries = 1024;
648
649         if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
650                 return -EINVAL;
651         rte_errno = 0;
652         while (--retries) {
653                 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
654                 if (rte_errno != EINPROGRESS)
655                         break;
656                 rte_pause();
657         }
658         return -rte_errno;
659 }