net/mlx5: fix probing device in legacy bonding mode
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_aso.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <mlx5_prm.h>
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
8
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
12
13 #include "mlx5.h"
14 #include "mlx5_flow.h"
15
16 /**
17  * Destroy Completion Queue used for ASO access.
18  *
19  * @param[in] cq
20  *   ASO CQ to destroy.
21  */
22 static void
23 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
24 {
25         if (cq->cq_obj.cq)
26                 mlx5_devx_cq_destroy(&cq->cq_obj);
27         memset(cq, 0, sizeof(*cq));
28 }
29
30 /**
31  * Create Completion Queue used for ASO access.
32  *
33  * @param[in] ctx
34  *   Context returned from mlx5 open_device() glue function.
35  * @param[in/out] cq
36  *   Pointer to CQ to create.
37  * @param[in] log_desc_n
38  *   Log of number of descriptors in queue.
39  * @param[in] socket
40  *   Socket to use for allocation.
41  * @param[in] uar_page_id
42  *   UAR page ID to use.
43  *
44  * @return
45  *   0 on success, a negative errno value otherwise and rte_errno is set.
46  */
47 static int
48 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
49                    int socket, int uar_page_id)
50 {
51         struct mlx5_devx_cq_attr attr = {
52                 .uar_page_id = uar_page_id,
53         };
54
55         cq->log_desc_n = log_desc_n;
56         cq->cq_ci = 0;
57         return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
58 }
59
60 /**
61  * Free MR resources.
62  *
63  * @param[in] mr
64  *   MR to free.
65  */
66 static void
67 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
68 {
69         claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
70         if (!mr->is_indirect && mr->umem)
71                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
72         mlx5_free(mr->buf);
73         memset(mr, 0, sizeof(*mr));
74 }
75
76 /**
77  * Register Memory Region.
78  *
79  * @param[in] ctx
80  *   Context returned from mlx5 open_device() glue function.
81  * @param[in] length
82  *   Size of MR buffer.
83  * @param[in/out] mr
84  *   Pointer to MR to create.
85  * @param[in] socket
86  *   Socket to use for allocation.
87  * @param[in] pdn
88  *   Protection Domain number to use.
89  *
90  * @return
91  *   0 on success, a negative errno value otherwise and rte_errno is set.
92  */
93 static int
94 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
95                      int socket, int pdn)
96 {
97         struct mlx5_devx_mkey_attr mkey_attr;
98
99         mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
100                               socket);
101         if (!mr->buf) {
102                 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
103                 return -1;
104         }
105         mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
106                                                  IBV_ACCESS_LOCAL_WRITE);
107         if (!mr->umem) {
108                 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
109                 goto error;
110         }
111         mkey_attr.addr = (uintptr_t)mr->buf;
112         mkey_attr.size = length;
113         mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
114         mkey_attr.pd = pdn;
115         mkey_attr.pg_access = 1;
116         mkey_attr.klm_array = NULL;
117         mkey_attr.klm_num = 0;
118         mkey_attr.relaxed_ordering_read = 0;
119         mkey_attr.relaxed_ordering_write = 0;
120         mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
121         if (!mr->mkey) {
122                 DRV_LOG(ERR, "Failed to create direct Mkey.");
123                 goto error;
124         }
125         mr->length = length;
126         mr->is_indirect = false;
127         return 0;
128 error:
129         if (mr->umem)
130                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
131         mlx5_free(mr->buf);
132         return -1;
133 }
134
135 /**
136  * Destroy Send Queue used for ASO access.
137  *
138  * @param[in] sq
139  *   ASO SQ to destroy.
140  */
141 static void
142 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
143 {
144         mlx5_devx_sq_destroy(&sq->sq_obj);
145         mlx5_aso_cq_destroy(&sq->cq);
146         memset(sq, 0, sizeof(*sq));
147 }
148
149 /**
150  * Initialize Send Queue used for ASO access.
151  *
152  * @param[in] sq
153  *   ASO SQ to initialize.
154  */
155 static void
156 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
157 {
158         volatile struct mlx5_aso_wqe *restrict wqe;
159         int i;
160         int size = 1 << sq->log_desc_n;
161         uint64_t addr;
162
163         /* All the next fields state should stay constant. */
164         for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
165                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
166                                                           (sizeof(*wqe) >> 4));
167                 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
168                 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
169                                             MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
170                 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
171                 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
172                 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
173                         (0u |
174                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
175                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
176                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
177                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
178                 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
179         }
180 }
181
182 /**
183  * Initialize Send Queue used for ASO flow meter access.
184  *
185  * @param[in] sq
186  *   ASO SQ to initialize.
187  */
188 static void
189 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
190 {
191         volatile struct mlx5_aso_wqe *restrict wqe;
192         int i;
193         int size = 1 << sq->log_desc_n;
194
195         /* All the next fields state should stay constant. */
196         for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
197                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
198                                                           (sizeof(*wqe) >> 4));
199                 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
200                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
201                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
202                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
203                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
204                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
205                                                          MLX5_COMP_MODE_OFFSET);
206         }
207 }
208
209 /**
210  * Create Send Queue used for ASO access.
211  *
212  * @param[in] ctx
213  *   Context returned from mlx5 open_device() glue function.
214  * @param[in/out] sq
215  *   Pointer to SQ to create.
216  * @param[in] socket
217  *   Socket to use for allocation.
218  * @param[in] uar
219  *   User Access Region object.
220  * @param[in] pdn
221  *   Protection Domain number to use.
222  * @param[in] log_desc_n
223  *   Log of number of descriptors in queue.
224  *
225  * @return
226  *   0 on success, a negative errno value otherwise and rte_errno is set.
227  */
228 static int
229 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
230                    void *uar, uint32_t pdn,  uint16_t log_desc_n,
231                    uint32_t ts_format)
232 {
233         struct mlx5_devx_create_sq_attr attr = {
234                 .user_index = 0xFFFF,
235                 .wq_attr = (struct mlx5_devx_wq_attr){
236                         .pd = pdn,
237                         .uar_page = mlx5_os_get_devx_uar_page_id(uar),
238                 },
239                 .ts_format = mlx5_ts_format_conv(ts_format),
240         };
241         struct mlx5_devx_modify_sq_attr modify_attr = {
242                 .state = MLX5_SQC_STATE_RDY,
243         };
244         uint16_t log_wqbb_n;
245         int ret;
246
247         if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
248                                mlx5_os_get_devx_uar_page_id(uar)))
249                 goto error;
250         sq->log_desc_n = log_desc_n;
251         attr.cqn = sq->cq.cq_obj.cq->id;
252         /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
253         log_wqbb_n = log_desc_n + 1;
254         ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
255         if (ret) {
256                 DRV_LOG(ERR, "Can't create SQ object.");
257                 rte_errno = ENOMEM;
258                 goto error;
259         }
260         ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
261         if (ret) {
262                 DRV_LOG(ERR, "Can't change SQ state to ready.");
263                 rte_errno = ENOMEM;
264                 goto error;
265         }
266         sq->pi = 0;
267         sq->head = 0;
268         sq->tail = 0;
269         sq->sqn = sq->sq_obj.sq->id;
270         sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
271         rte_spinlock_init(&sq->sqsl);
272         return 0;
273 error:
274         mlx5_aso_destroy_sq(sq);
275         return -1;
276 }
277
278 /**
279  * API to create and initialize Send Queue used for ASO access.
280  *
281  * @param[in] sh
282  *   Pointer to shared device context.
283  *
284  * @return
285  *   0 on success, a negative errno value otherwise and rte_errno is set.
286  */
287 int
288 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
289                         enum mlx5_access_aso_opc_mod aso_opc_mod)
290 {
291         uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
292
293         switch (aso_opc_mod) {
294         case ASO_OPC_MOD_FLOW_HIT:
295                 if (mlx5_aso_devx_reg_mr(sh->ctx,
296                         (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
297                         sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn))
298                         return -1;
299                 if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
300                                   sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
301                                   sh->sq_ts_format)) {
302                         mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
303                         return -1;
304                 }
305                 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
306                 break;
307         case ASO_OPC_MOD_POLICER:
308                 if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
309                                   sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
310                                   sh->sq_ts_format))
311                         return -1;
312                 mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
313                 break;
314         default:
315                 DRV_LOG(ERR, "Unknown ASO operation mode");
316                 return -1;
317         }
318         return 0;
319 }
320
321 /**
322  * API to destroy Send Queue used for ASO access.
323  *
324  * @param[in] sh
325  *   Pointer to shared device context.
326  */
327 void
328 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
329                                 enum mlx5_access_aso_opc_mod aso_opc_mod)
330 {
331         struct mlx5_aso_sq *sq;
332
333         switch (aso_opc_mod) {
334         case ASO_OPC_MOD_FLOW_HIT:
335                 mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
336                 sq = &sh->aso_age_mng->aso_sq;
337                 break;
338         case ASO_OPC_MOD_POLICER:
339                 sq = &sh->mtrmng->pools_mng.sq;
340                 break;
341         default:
342                 DRV_LOG(ERR, "Unknown ASO operation mode");
343                 return;
344         }
345         mlx5_aso_destroy_sq(sq);
346 }
347
348 /**
349  * Write a burst of WQEs to ASO SQ.
350  *
351  * @param[in] mng
352  *   ASO management data, contains the SQ.
353  * @param[in] n
354  *   Index of the last valid pool.
355  *
356  * @return
357  *   Number of WQEs in burst.
358  */
359 static uint16_t
360 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
361 {
362         volatile struct mlx5_aso_wqe *wqe;
363         struct mlx5_aso_sq *sq = &mng->aso_sq;
364         struct mlx5_aso_age_pool *pool;
365         uint16_t size = 1 << sq->log_desc_n;
366         uint16_t mask = size - 1;
367         uint16_t max;
368         uint16_t start_head = sq->head;
369
370         max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
371         if (unlikely(!max))
372                 return 0;
373         sq->elts[start_head & mask].burst_size = max;
374         do {
375                 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
376                 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
377                 /* Fill next WQE. */
378                 rte_spinlock_lock(&mng->resize_sl);
379                 pool = mng->pools[sq->next];
380                 rte_spinlock_unlock(&mng->resize_sl);
381                 sq->elts[sq->head & mask].pool = pool;
382                 wqe->general_cseg.misc =
383                                 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
384                                                  (pool->flow_hit_aso_obj))->id);
385                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
386                                                          MLX5_COMP_MODE_OFFSET);
387                 wqe->general_cseg.opcode = rte_cpu_to_be_32
388                                                 (MLX5_OPCODE_ACCESS_ASO |
389                                                  (ASO_OPC_MOD_FLOW_HIT <<
390                                                   WQE_CSEG_OPC_MOD_OFFSET) |
391                                                  (sq->pi <<
392                                                   WQE_CSEG_WQE_INDEX_OFFSET));
393                 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
394                 sq->head++;
395                 sq->next++;
396                 max--;
397         } while (max);
398         wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
399                                                          MLX5_COMP_MODE_OFFSET);
400         rte_io_wmb();
401         sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
402         rte_wmb();
403         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
404         rte_wmb();
405         return sq->elts[start_head & mask].burst_size;
406 }
407
408 /**
409  * Debug utility function. Dump contents of error CQE and WQE.
410  *
411  * @param[in] cqe
412  *   Error CQE to dump.
413  * @param[in] wqe
414  *   Error WQE to dump.
415  */
416 static void
417 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
418 {
419         int i;
420
421         DRV_LOG(ERR, "Error cqe:");
422         for (i = 0; i < 16; i += 4)
423                 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
424                         cqe[i + 2], cqe[i + 3]);
425         DRV_LOG(ERR, "\nError wqe:");
426         for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
427                 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
428                         wqe[i + 2], wqe[i + 3]);
429 }
430
431 /**
432  * Handle case of error CQE.
433  *
434  * @param[in] sq
435  *   ASO SQ to use.
436  */
437 static void
438 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
439 {
440         struct mlx5_aso_cq *cq = &sq->cq;
441         uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
442         volatile struct mlx5_err_cqe *cqe =
443                         (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
444
445         cq->errors++;
446         idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
447         mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
448                                (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
449 }
450
451 /**
452  * Update ASO objects upon completion.
453  *
454  * @param[in] sh
455  *   Shared device context.
456  * @param[in] n
457  *   Number of completed ASO objects.
458  */
459 static void
460 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
461 {
462         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
463         struct mlx5_aso_sq *sq = &mng->aso_sq;
464         struct mlx5_age_info *age_info;
465         const uint16_t size = 1 << sq->log_desc_n;
466         const uint16_t mask = size - 1;
467         const uint64_t curr = MLX5_CURR_TIME_SEC;
468         uint16_t expected = AGE_CANDIDATE;
469         uint16_t i;
470
471         for (i = 0; i < n; ++i) {
472                 uint16_t idx = (sq->tail + i) & mask;
473                 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
474                 uint64_t diff = curr - pool->time_of_last_age_check;
475                 uint64_t *addr = sq->mr.buf;
476                 int j;
477
478                 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
479                 pool->time_of_last_age_check = curr;
480                 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
481                         struct mlx5_aso_age_action *act = &pool->actions[j];
482                         struct mlx5_age_param *ap = &act->age_params;
483                         uint8_t byte;
484                         uint8_t offset;
485                         uint8_t *u8addr;
486                         uint8_t hit;
487
488                         if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
489                                             AGE_CANDIDATE)
490                                 continue;
491                         byte = 63 - (j / 8);
492                         offset = j % 8;
493                         u8addr = (uint8_t *)addr;
494                         hit = (u8addr[byte] >> offset) & 0x1;
495                         if (hit) {
496                                 __atomic_store_n(&ap->sec_since_last_hit, 0,
497                                                  __ATOMIC_RELAXED);
498                         } else {
499                                 struct mlx5_priv *priv;
500
501                                 __atomic_fetch_add(&ap->sec_since_last_hit,
502                                                    diff, __ATOMIC_RELAXED);
503                                 /* If timeout passed add to aged-out list. */
504                                 if (ap->sec_since_last_hit <= ap->timeout)
505                                         continue;
506                                 priv =
507                                 rte_eth_devices[ap->port_id].data->dev_private;
508                                 age_info = GET_PORT_AGE_INFO(priv);
509                                 rte_spinlock_lock(&age_info->aged_sl);
510                                 if (__atomic_compare_exchange_n(&ap->state,
511                                                                 &expected,
512                                                                 AGE_TMOUT,
513                                                                 false,
514                                                                __ATOMIC_RELAXED,
515                                                             __ATOMIC_RELAXED)) {
516                                         LIST_INSERT_HEAD(&age_info->aged_aso,
517                                                          act, next);
518                                         MLX5_AGE_SET(age_info,
519                                                      MLX5_AGE_EVENT_NEW);
520                                 }
521                                 rte_spinlock_unlock(&age_info->aged_sl);
522                         }
523                 }
524         }
525         mlx5_age_event_prepare(sh);
526 }
527
528 /**
529  * Handle completions from WQEs sent to ASO SQ.
530  *
531  * @param[in] sh
532  *   Shared device context.
533  *
534  * @return
535  *   Number of CQEs handled.
536  */
537 static uint16_t
538 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
539 {
540         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
541         struct mlx5_aso_sq *sq = &mng->aso_sq;
542         struct mlx5_aso_cq *cq = &sq->cq;
543         volatile struct mlx5_cqe *restrict cqe;
544         const unsigned int cq_size = 1 << cq->log_desc_n;
545         const unsigned int mask = cq_size - 1;
546         uint32_t idx;
547         uint32_t next_idx = cq->cq_ci & mask;
548         const uint16_t max = (uint16_t)(sq->head - sq->tail);
549         uint16_t i = 0;
550         int ret;
551         if (unlikely(!max))
552                 return 0;
553         do {
554                 idx = next_idx;
555                 next_idx = (cq->cq_ci + 1) & mask;
556                 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
557                 cqe = &cq->cq_obj.cqes[idx];
558                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
559                 /*
560                  * Be sure owner read is done before any other cookie field or
561                  * opaque field.
562                  */
563                 rte_io_rmb();
564                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
565                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
566                                 break;
567                         mlx5_aso_cqe_err_handle(sq);
568                 } else {
569                         i += sq->elts[(sq->tail + i) & mask].burst_size;
570                 }
571                 cq->cq_ci++;
572         } while (1);
573         if (likely(i)) {
574                 mlx5_aso_age_action_update(sh, i);
575                 sq->tail += i;
576                 rte_io_wmb();
577                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
578         }
579         return i;
580 }
581
582 /**
583  * Periodically read CQEs and send WQEs to ASO SQ.
584  *
585  * @param[in] arg
586  *   Shared device context containing the ASO SQ.
587  */
588 static void
589 mlx5_flow_aso_alarm(void *arg)
590 {
591         struct mlx5_dev_ctx_shared *sh = arg;
592         struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
593         uint32_t us = 100u;
594         uint16_t n;
595
596         rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
597         n = sh->aso_age_mng->next;
598         rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
599         mlx5_aso_completion_handle(sh);
600         if (sq->next == n) {
601                 /* End of loop: wait 1 second. */
602                 us = US_PER_S;
603                 sq->next = 0;
604         }
605         mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
606         if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
607                 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
608 }
609
610 /**
611  * API to start ASO access using ASO SQ.
612  *
613  * @param[in] sh
614  *   Pointer to shared device context.
615  *
616  * @return
617  *   0 on success, a negative errno value otherwise and rte_errno is set.
618  */
619 int
620 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
621 {
622         if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
623                 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
624                 return -rte_errno;
625         }
626         return 0;
627 }
628
629 /**
630  * API to stop ASO access using ASO SQ.
631  *
632  * @param[in] sh
633  *   Pointer to shared device context.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 int
639 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
640 {
641         int retries = 1024;
642
643         if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
644                 return -EINVAL;
645         rte_errno = 0;
646         while (--retries) {
647                 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
648                 if (rte_errno != EINPROGRESS)
649                         break;
650                 rte_pause();
651         }
652         return -rte_errno;
653 }
654
655 static uint16_t
656 mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
657                 struct mlx5_aso_mtr *aso_mtr)
658 {
659         volatile struct mlx5_aso_wqe *wqe = NULL;
660         struct mlx5_flow_meter_info *fm = NULL;
661         struct mlx5_flow_meter_profile *fmp;
662         uint16_t size = 1 << sq->log_desc_n;
663         uint16_t mask = size - 1;
664         uint16_t res;
665         uint32_t dseg_idx = 0;
666         struct mlx5_aso_mtr_pool *pool = NULL;
667
668         rte_spinlock_lock(&sq->sqsl);
669         res = size - (uint16_t)(sq->head - sq->tail);
670         if (unlikely(!res)) {
671                 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
672                 rte_spinlock_unlock(&sq->sqsl);
673                 return 0;
674         }
675         wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
676         rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
677         /* Fill next WQE. */
678         fm = &aso_mtr->fm;
679         sq->elts[sq->head & mask].mtr = aso_mtr;
680         pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
681                         mtrs[aso_mtr->offset]);
682         wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
683                         (aso_mtr->offset >> 1));
684         wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
685                         (ASO_OPC_MOD_POLICER <<
686                         WQE_CSEG_OPC_MOD_OFFSET) |
687                         sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
688         /* There are 2 meters in one ASO cache line. */
689         dseg_idx = aso_mtr->offset & 0x1;
690         wqe->aso_cseg.data_mask =
691                 RTE_BE64(MLX5_IFC_FLOW_METER_PARAM_MASK << (32 * !dseg_idx));
692         if (fm->is_enable) {
693                 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
694                         fm->profile->srtcm_prm.cbs_cir;
695                 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir =
696                         fm->profile->srtcm_prm.ebs_eir;
697         } else {
698                 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
699                         RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL);
700                 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
701         }
702         fmp = fm->profile;
703         if (fmp->profile.packet_mode)
704                 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
705                                 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
706                                 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
707                                 (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
708         else
709                 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
710                                 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
711                                 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
712         sq->head++;
713         sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
714         rte_io_wmb();
715         sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
716         rte_wmb();
717         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
718         rte_wmb();
719         rte_spinlock_unlock(&sq->sqsl);
720         return 1;
721 }
722
723 static void
724 mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
725 {
726         uint16_t size = 1 << sq->log_desc_n;
727         uint16_t mask = size - 1;
728         uint16_t i;
729         struct mlx5_aso_mtr *aso_mtr = NULL;
730         uint8_t exp_state = ASO_METER_WAIT;
731
732         for (i = 0; i < aso_mtrs_nums; ++i) {
733                 aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
734                 MLX5_ASSERT(aso_mtr);
735                 (void)__atomic_compare_exchange_n(&aso_mtr->state,
736                                 &exp_state, ASO_METER_READY,
737                                 false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
738         }
739 }
740
741 static void
742 mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
743 {
744         struct mlx5_aso_cq *cq = &sq->cq;
745         volatile struct mlx5_cqe *restrict cqe;
746         const unsigned int cq_size = 1 << cq->log_desc_n;
747         const unsigned int mask = cq_size - 1;
748         uint32_t idx;
749         uint32_t next_idx = cq->cq_ci & mask;
750         uint16_t max;
751         uint16_t n = 0;
752         int ret;
753
754         rte_spinlock_lock(&sq->sqsl);
755         max = (uint16_t)(sq->head - sq->tail);
756         if (unlikely(!max)) {
757                 rte_spinlock_unlock(&sq->sqsl);
758                 return;
759         }
760         do {
761                 idx = next_idx;
762                 next_idx = (cq->cq_ci + 1) & mask;
763                 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
764                 cqe = &cq->cq_obj.cqes[idx];
765                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
766                 /*
767                  * Be sure owner read is done before any other cookie field or
768                  * opaque field.
769                  */
770                 rte_io_rmb();
771                 if (ret != MLX5_CQE_STATUS_SW_OWN) {
772                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
773                                 break;
774                         mlx5_aso_cqe_err_handle(sq);
775                 } else {
776                         n++;
777                 }
778                 cq->cq_ci++;
779         } while (1);
780         if (likely(n)) {
781                 mlx5_aso_mtrs_status_update(sq, n);
782                 sq->tail += n;
783                 rte_io_wmb();
784                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
785         }
786         rte_spinlock_unlock(&sq->sqsl);
787 }
788
789 /**
790  * Update meter parameter by send WQE.
791  *
792  * @param[in] dev
793  *   Pointer to Ethernet device.
794  * @param[in] priv
795  *   Pointer to mlx5 private data structure.
796  * @param[in] fm
797  *   Pointer to flow meter to be modified.
798  *
799  * @return
800  *   0 on success, a negative errno value otherwise and rte_errno is set.
801  */
802 int
803 mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
804                         struct mlx5_aso_mtr *mtr)
805 {
806         struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
807         uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
808
809         do {
810                 mlx5_aso_mtr_completion_handle(sq);
811                 if (mlx5_aso_mtr_sq_enqueue_single(sq, mtr))
812                         return 0;
813                 /* Waiting for wqe resource. */
814                 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
815         } while (--poll_wqe_times);
816         DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d",
817                         mtr->offset);
818         return -1;
819 }
820
821 /**
822  * Wait for meter to be ready.
823  *
824  * @param[in] dev
825  *   Pointer to Ethernet device.
826  * @param[in] priv
827  *   Pointer to mlx5 private data structure.
828  * @param[in] fm
829  *   Pointer to flow meter to be modified.
830  *
831  * @return
832  *   0 on success, a negative errno value otherwise and rte_errno is set.
833  */
834 int
835 mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
836                         struct mlx5_aso_mtr *mtr)
837 {
838         struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
839         uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
840
841         if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
842                                             ASO_METER_READY)
843                 return 0;
844         do {
845                 mlx5_aso_mtr_completion_handle(sq);
846                 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
847                                             ASO_METER_READY)
848                         return 0;
849                 /* Waiting for CQE ready. */
850                 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
851         } while (--poll_cqe_times);
852         DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d",
853                         mtr->offset);
854         return -1;
855 }