f0cb530524db7e189efbd8c9d2f182a0c204a85a
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12
13 /* The maximum actions support in the flow. */
14 #define MLX5_HW_MAX_ACTS 16
15
16 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
17
18 /* DR action flags with different table. */
19 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
20                                 [MLX5DR_TABLE_TYPE_MAX] = {
21         {
22                 MLX5DR_ACTION_FLAG_ROOT_RX,
23                 MLX5DR_ACTION_FLAG_ROOT_TX,
24                 MLX5DR_ACTION_FLAG_ROOT_FDB,
25         },
26         {
27                 MLX5DR_ACTION_FLAG_HWS_RX,
28                 MLX5DR_ACTION_FLAG_HWS_TX,
29                 MLX5DR_ACTION_FLAG_HWS_FDB,
30         },
31 };
32
33 /**
34  * Destroy DR actions created by action template.
35  *
36  * For DR actions created during table creation's action translate.
37  * Need to destroy the DR action when destroying the table.
38  *
39  * @param[in] acts
40  *   Pointer to the template HW steering DR actions.
41  */
42 static void
43 __flow_hw_action_template_destroy(struct mlx5_hw_actions *acts __rte_unused)
44 {
45 }
46
47 /**
48  * Translate rte_flow actions to DR action.
49  *
50  * As the action template has already indicated the actions. Translate
51  * the rte_flow actions to DR action if possbile. So in flow create
52  * stage we will save cycles from handing the actions' organizing.
53  * For the actions with limited information, need to add these to a
54  * list.
55  *
56  * @param[in] dev
57  *   Pointer to the rte_eth_dev structure.
58  * @param[in] table_attr
59  *   Pointer to the table attributes.
60  * @param[in] item_templates
61  *   Item template array to be binded to the table.
62  * @param[in/out] acts
63  *   Pointer to the template HW steering DR actions.
64  * @param[in] at
65  *   Action template.
66  * @param[out] error
67  *   Pointer to error structure.
68  *
69  * @return
70  *    Table on success, NULL otherwise and rte_errno is set.
71  */
72 static int
73 flow_hw_actions_translate(struct rte_eth_dev *dev,
74                           const struct rte_flow_template_table_attr *table_attr,
75                           struct mlx5_hw_actions *acts,
76                           struct rte_flow_actions_template *at,
77                           struct rte_flow_error *error __rte_unused)
78 {
79         struct mlx5_priv *priv = dev->data->dev_private;
80         const struct rte_flow_attr *attr = &table_attr->flow_attr;
81         struct rte_flow_action *actions = at->actions;
82         struct rte_flow_action *masks = at->masks;
83         bool actions_end = false;
84         uint32_t type;
85
86         if (attr->transfer)
87                 type = MLX5DR_TABLE_TYPE_FDB;
88         else if (attr->egress)
89                 type = MLX5DR_TABLE_TYPE_NIC_TX;
90         else
91                 type = MLX5DR_TABLE_TYPE_NIC_RX;
92         for (; !actions_end; actions++, masks++) {
93                 switch (actions->type) {
94                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
95                         break;
96                 case RTE_FLOW_ACTION_TYPE_VOID:
97                         break;
98                 case RTE_FLOW_ACTION_TYPE_DROP:
99                         acts->drop = priv->hw_drop[!!attr->group][type];
100                         break;
101                 case RTE_FLOW_ACTION_TYPE_END:
102                         actions_end = true;
103                         break;
104                 default:
105                         break;
106                 }
107         }
108         return 0;
109 }
110
111 /**
112  * Construct flow action array.
113  *
114  * For action template contains dynamic actions, these actions need to
115  * be updated according to the rte_flow action during flow creation.
116  *
117  * @param[in] hw_acts
118  *   Pointer to translated actions from template.
119  * @param[in] actions
120  *   Array of rte_flow action need to be checked.
121  * @param[in] rule_acts
122  *   Array of DR rule actions to be used during flow creation..
123  * @param[in] acts_num
124  *   Pointer to the real acts_num flow has.
125  *
126  * @return
127  *    0 on success, negative value otherwise and rte_errno is set.
128  */
129 static __rte_always_inline int
130 flow_hw_actions_construct(struct mlx5_hw_actions *hw_acts,
131                           const struct rte_flow_action actions[],
132                           struct mlx5dr_rule_action *rule_acts,
133                           uint32_t *acts_num)
134 {
135         bool actions_end = false;
136         uint32_t i;
137
138         for (i = 0; !actions_end || (i >= MLX5_HW_MAX_ACTS); actions++) {
139                 switch (actions->type) {
140                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
141                         break;
142                 case RTE_FLOW_ACTION_TYPE_VOID:
143                         break;
144                 case RTE_FLOW_ACTION_TYPE_DROP:
145                         rule_acts[i++].action = hw_acts->drop;
146                         break;
147                 case RTE_FLOW_ACTION_TYPE_END:
148                         actions_end = true;
149                         break;
150                 default:
151                         break;
152                 }
153         }
154         *acts_num = i;
155         return 0;
156 }
157
158 /**
159  * Enqueue HW steering flow creation.
160  *
161  * The flow will be applied to the HW only if the postpone bit is not set or
162  * the extra push function is called.
163  * The flow creation status should be checked from dequeue result.
164  *
165  * @param[in] dev
166  *   Pointer to the rte_eth_dev structure.
167  * @param[in] queue
168  *   The queue to create the flow.
169  * @param[in] attr
170  *   Pointer to the flow operation attributes.
171  * @param[in] items
172  *   Items with flow spec value.
173  * @param[in] pattern_template_index
174  *   The item pattern flow follows from the table.
175  * @param[in] actions
176  *   Action with flow spec value.
177  * @param[in] action_template_index
178  *   The action pattern flow follows from the table.
179  * @param[in] user_data
180  *   Pointer to the user_data.
181  * @param[out] error
182  *   Pointer to error structure.
183  *
184  * @return
185  *    Flow pointer on success, NULL otherwise and rte_errno is set.
186  */
187 static struct rte_flow *
188 flow_hw_async_flow_create(struct rte_eth_dev *dev,
189                           uint32_t queue,
190                           const struct rte_flow_op_attr *attr,
191                           struct rte_flow_template_table *table,
192                           const struct rte_flow_item items[],
193                           uint8_t pattern_template_index,
194                           const struct rte_flow_action actions[],
195                           uint8_t action_template_index,
196                           void *user_data,
197                           struct rte_flow_error *error)
198 {
199         struct mlx5_priv *priv = dev->data->dev_private;
200         struct mlx5dr_rule_attr rule_attr = {
201                 .queue_id = queue,
202                 .user_data = user_data,
203                 .burst = attr->postpone,
204         };
205         struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
206         struct mlx5_hw_actions *hw_acts;
207         struct rte_flow_hw *flow;
208         struct mlx5_hw_q_job *job;
209         uint32_t acts_num, flow_idx;
210         int ret;
211
212         if (unlikely(!priv->hw_q[queue].job_idx)) {
213                 rte_errno = ENOMEM;
214                 goto error;
215         }
216         flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
217         if (!flow)
218                 goto error;
219         /*
220          * Set the table here in order to know the destination table
221          * when free the flow afterwards.
222          */
223         flow->table = table;
224         flow->idx = flow_idx;
225         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
226         /*
227          * Set the job type here in order to know if the flow memory
228          * should be freed or not when get the result from dequeue.
229          */
230         job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
231         job->flow = flow;
232         job->user_data = user_data;
233         rule_attr.user_data = job;
234         hw_acts = &table->ats[action_template_index].acts;
235         /* Construct the flow action array based on the input actions.*/
236         flow_hw_actions_construct(hw_acts, actions, rule_acts, &acts_num);
237         ret = mlx5dr_rule_create(table->matcher,
238                                  pattern_template_index, items,
239                                  rule_acts, acts_num,
240                                  &rule_attr, &flow->rule);
241         if (likely(!ret))
242                 return (struct rte_flow *)flow;
243         /* Flow created fail, return the descriptor and flow memory. */
244         mlx5_ipool_free(table->flow, flow_idx);
245         priv->hw_q[queue].job_idx++;
246 error:
247         rte_flow_error_set(error, rte_errno,
248                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
249                            "fail to create rte flow");
250         return NULL;
251 }
252
253 /**
254  * Enqueue HW steering flow destruction.
255  *
256  * The flow will be applied to the HW only if the postpone bit is not set or
257  * the extra push function is called.
258  * The flow destruction status should be checked from dequeue result.
259  *
260  * @param[in] dev
261  *   Pointer to the rte_eth_dev structure.
262  * @param[in] queue
263  *   The queue to destroy the flow.
264  * @param[in] attr
265  *   Pointer to the flow operation attributes.
266  * @param[in] flow
267  *   Pointer to the flow to be destroyed.
268  * @param[in] user_data
269  *   Pointer to the user_data.
270  * @param[out] error
271  *   Pointer to error structure.
272  *
273  * @return
274  *    0 on success, negative value otherwise and rte_errno is set.
275  */
276 static int
277 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
278                            uint32_t queue,
279                            const struct rte_flow_op_attr *attr,
280                            struct rte_flow *flow,
281                            void *user_data,
282                            struct rte_flow_error *error)
283 {
284         struct mlx5_priv *priv = dev->data->dev_private;
285         struct mlx5dr_rule_attr rule_attr = {
286                 .queue_id = queue,
287                 .user_data = user_data,
288                 .burst = attr->postpone,
289         };
290         struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
291         struct mlx5_hw_q_job *job;
292         int ret;
293
294         if (unlikely(!priv->hw_q[queue].job_idx)) {
295                 rte_errno = ENOMEM;
296                 goto error;
297         }
298         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
299         job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
300         job->user_data = user_data;
301         job->flow = fh;
302         rule_attr.user_data = job;
303         ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
304         if (likely(!ret))
305                 return 0;
306         priv->hw_q[queue].job_idx++;
307 error:
308         return rte_flow_error_set(error, rte_errno,
309                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
310                         "fail to create rte flow");
311 }
312
313 /**
314  * Pull the enqueued flows.
315  *
316  * For flows enqueued from creation/destruction, the status should be
317  * checked from the dequeue result.
318  *
319  * @param[in] dev
320  *   Pointer to the rte_eth_dev structure.
321  * @param[in] queue
322  *   The queue to pull the result.
323  * @param[in/out] res
324  *   Array to save the results.
325  * @param[in] n_res
326  *   Available result with the array.
327  * @param[out] error
328  *   Pointer to error structure.
329  *
330  * @return
331  *    Result number on success, negative value otherwise and rte_errno is set.
332  */
333 static int
334 flow_hw_pull(struct rte_eth_dev *dev,
335              uint32_t queue,
336              struct rte_flow_op_result res[],
337              uint16_t n_res,
338              struct rte_flow_error *error)
339 {
340         struct mlx5_priv *priv = dev->data->dev_private;
341         struct mlx5_hw_q_job *job;
342         int ret, i;
343
344         ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
345         if (ret < 0)
346                 return rte_flow_error_set(error, rte_errno,
347                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
348                                 "fail to query flow queue");
349         for (i = 0; i <  ret; i++) {
350                 job = (struct mlx5_hw_q_job *)res[i].user_data;
351                 /* Restore user data. */
352                 res[i].user_data = job->user_data;
353                 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY)
354                         mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
355                 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
356         }
357         return ret;
358 }
359
360 /**
361  * Push the enqueued flows to HW.
362  *
363  * Force apply all the enqueued flows to the HW.
364  *
365  * @param[in] dev
366  *   Pointer to the rte_eth_dev structure.
367  * @param[in] queue
368  *   The queue to push the flow.
369  * @param[out] error
370  *   Pointer to error structure.
371  *
372  * @return
373  *    0 on success, negative value otherwise and rte_errno is set.
374  */
375 static int
376 flow_hw_push(struct rte_eth_dev *dev,
377              uint32_t queue,
378              struct rte_flow_error *error)
379 {
380         struct mlx5_priv *priv = dev->data->dev_private;
381         int ret;
382
383         ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
384                                        MLX5DR_SEND_QUEUE_ACTION_DRAIN);
385         if (ret) {
386                 rte_flow_error_set(error, rte_errno,
387                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
388                                    "fail to push flows");
389                 return ret;
390         }
391         return 0;
392 }
393
394 /**
395  * Create flow table.
396  *
397  * The input item and action templates will be binded to the table.
398  * Flow memory will also be allocated. Matcher will be created based
399  * on the item template. Action will be translated to the dedicated
400  * DR action if possible.
401  *
402  * @param[in] dev
403  *   Pointer to the rte_eth_dev structure.
404  * @param[in] attr
405  *   Pointer to the table attributes.
406  * @param[in] item_templates
407  *   Item template array to be binded to the table.
408  * @param[in] nb_item_templates
409  *   Number of item template.
410  * @param[in] action_templates
411  *   Action template array to be binded to the table.
412  * @param[in] nb_action_templates
413  *   Number of action template.
414  * @param[out] error
415  *   Pointer to error structure.
416  *
417  * @return
418  *    Table on success, NULL otherwise and rte_errno is set.
419  */
420 static struct rte_flow_template_table *
421 flow_hw_table_create(struct rte_eth_dev *dev,
422                      const struct rte_flow_template_table_attr *attr,
423                      struct rte_flow_pattern_template *item_templates[],
424                      uint8_t nb_item_templates,
425                      struct rte_flow_actions_template *action_templates[],
426                      uint8_t nb_action_templates,
427                      struct rte_flow_error *error)
428 {
429         struct mlx5_priv *priv = dev->data->dev_private;
430         struct mlx5dr_matcher_attr matcher_attr = {0};
431         struct rte_flow_template_table *tbl = NULL;
432         struct mlx5_flow_group *grp;
433         struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
434         struct rte_flow_attr flow_attr = attr->flow_attr;
435         struct mlx5_flow_cb_ctx ctx = {
436                 .dev = dev,
437                 .error = error,
438                 .data = &flow_attr,
439         };
440         struct mlx5_indexed_pool_config cfg = {
441                 .size = sizeof(struct rte_flow_hw),
442                 .trunk_size = 1 << 12,
443                 .per_core_cache = 1 << 13,
444                 .need_lock = 1,
445                 .release_mem_en = !!priv->sh->config.reclaim_mode,
446                 .malloc = mlx5_malloc,
447                 .free = mlx5_free,
448                 .type = "mlx5_hw_table_flow",
449         };
450         struct mlx5_list_entry *ge;
451         uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
452         uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
453         int err;
454
455         /* HWS layer accepts only 1 item template with root table. */
456         if (!attr->flow_attr.group)
457                 max_tpl = 1;
458         cfg.max_idx = nb_flows;
459         /* For table has very limited flows, disable cache. */
460         if (nb_flows < cfg.trunk_size) {
461                 cfg.per_core_cache = 0;
462                 cfg.trunk_size = nb_flows;
463         }
464         /* Check if we requires too many templates. */
465         if (nb_item_templates > max_tpl ||
466             nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
467                 rte_errno = EINVAL;
468                 goto error;
469         }
470         /* Allocate the table memory. */
471         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
472         if (!tbl)
473                 goto error;
474         /* Allocate flow indexed pool. */
475         tbl->flow = mlx5_ipool_create(&cfg);
476         if (!tbl->flow)
477                 goto error;
478         /* Register the flow group. */
479         ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
480         if (!ge)
481                 goto error;
482         grp = container_of(ge, struct mlx5_flow_group, entry);
483         tbl->grp = grp;
484         /* Prepare matcher information. */
485         matcher_attr.priority = attr->flow_attr.priority;
486         matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
487         matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
488         /* Build the item template. */
489         for (i = 0; i < nb_item_templates; i++) {
490                 uint32_t ret;
491
492                 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
493                                          __ATOMIC_RELAXED);
494                 if (ret <= 1) {
495                         rte_errno = EINVAL;
496                         goto it_error;
497                 }
498                 mt[i] = item_templates[i]->mt;
499                 tbl->its[i] = item_templates[i];
500         }
501         tbl->matcher = mlx5dr_matcher_create
502                 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
503         if (!tbl->matcher)
504                 goto it_error;
505         tbl->nb_item_templates = nb_item_templates;
506         /* Build the action template. */
507         for (i = 0; i < nb_action_templates; i++) {
508                 uint32_t ret;
509
510                 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
511                                          __ATOMIC_RELAXED);
512                 if (ret <= 1) {
513                         rte_errno = EINVAL;
514                         goto at_error;
515                 }
516                 err = flow_hw_actions_translate(dev, attr,
517                                                 &tbl->ats[i].acts,
518                                                 action_templates[i], error);
519                 if (err) {
520                         i++;
521                         goto at_error;
522                 }
523                 tbl->ats[i].action_template = action_templates[i];
524         }
525         tbl->nb_action_templates = nb_action_templates;
526         tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
527                     (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
528                     MLX5DR_TABLE_TYPE_NIC_RX);
529         LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
530         return tbl;
531 at_error:
532         while (i--) {
533                 __flow_hw_action_template_destroy(&tbl->ats[i].acts);
534                 __atomic_sub_fetch(&action_templates[i]->refcnt,
535                                    1, __ATOMIC_RELAXED);
536         }
537         i = nb_item_templates;
538 it_error:
539         while (i--)
540                 __atomic_sub_fetch(&item_templates[i]->refcnt,
541                                    1, __ATOMIC_RELAXED);
542         mlx5dr_matcher_destroy(tbl->matcher);
543 error:
544         err = rte_errno;
545         if (tbl) {
546                 if (tbl->grp)
547                         mlx5_hlist_unregister(priv->sh->groups,
548                                               &tbl->grp->entry);
549                 if (tbl->flow)
550                         mlx5_ipool_destroy(tbl->flow);
551                 mlx5_free(tbl);
552         }
553         rte_flow_error_set(error, err,
554                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
555                           "fail to create rte table");
556         return NULL;
557 }
558
559 /**
560  * Destroy flow table.
561  *
562  * @param[in] dev
563  *   Pointer to the rte_eth_dev structure.
564  * @param[in] table
565  *   Pointer to the table to be destroyed.
566  * @param[out] error
567  *   Pointer to error structure.
568  *
569  * @return
570  *   0 on success, a negative errno value otherwise and rte_errno is set.
571  */
572 static int
573 flow_hw_table_destroy(struct rte_eth_dev *dev,
574                       struct rte_flow_template_table *table,
575                       struct rte_flow_error *error)
576 {
577         struct mlx5_priv *priv = dev->data->dev_private;
578         int i;
579
580         if (table->refcnt) {
581                 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
582                 return rte_flow_error_set(error, EBUSY,
583                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
584                                    NULL,
585                                    "table in using");
586         }
587         LIST_REMOVE(table, next);
588         for (i = 0; i < table->nb_item_templates; i++)
589                 __atomic_sub_fetch(&table->its[i]->refcnt,
590                                    1, __ATOMIC_RELAXED);
591         for (i = 0; i < table->nb_action_templates; i++) {
592                 __flow_hw_action_template_destroy(&table->ats[i].acts);
593                 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
594                                    1, __ATOMIC_RELAXED);
595         }
596         mlx5dr_matcher_destroy(table->matcher);
597         mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
598         mlx5_ipool_destroy(table->flow);
599         mlx5_free(table);
600         return 0;
601 }
602
603 /**
604  * Create flow action template.
605  *
606  * @param[in] dev
607  *   Pointer to the rte_eth_dev structure.
608  * @param[in] attr
609  *   Pointer to the action template attributes.
610  * @param[in] actions
611  *   Associated actions (list terminated by the END action).
612  * @param[in] masks
613  *   List of actions that marks which of the action's member is constant.
614  * @param[out] error
615  *   Pointer to error structure.
616  *
617  * @return
618  *   Action template pointer on success, NULL otherwise and rte_errno is set.
619  */
620 static struct rte_flow_actions_template *
621 flow_hw_actions_template_create(struct rte_eth_dev *dev,
622                         const struct rte_flow_actions_template_attr *attr,
623                         const struct rte_flow_action actions[],
624                         const struct rte_flow_action masks[],
625                         struct rte_flow_error *error)
626 {
627         struct mlx5_priv *priv = dev->data->dev_private;
628         int len, act_len, mask_len, i;
629         struct rte_flow_actions_template *at;
630
631         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
632                                 NULL, 0, actions, error);
633         if (act_len <= 0)
634                 return NULL;
635         len = RTE_ALIGN(act_len, 16);
636         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
637                                  NULL, 0, masks, error);
638         if (mask_len <= 0)
639                 return NULL;
640         len += RTE_ALIGN(mask_len, 16);
641         at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
642         if (!at) {
643                 rte_flow_error_set(error, ENOMEM,
644                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
645                                    NULL,
646                                    "cannot allocate action template");
647                 return NULL;
648         }
649         at->attr = *attr;
650         at->actions = (struct rte_flow_action *)(at + 1);
651         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
652                                 actions, error);
653         if (act_len <= 0)
654                 goto error;
655         at->masks = (struct rte_flow_action *)
656                     (((uint8_t *)at->actions) + act_len);
657         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
658                                  len - act_len, masks, error);
659         if (mask_len <= 0)
660                 goto error;
661         /*
662          * mlx5 PMD hacks indirect action index directly to the action conf.
663          * The rte_flow_conv() function copies the content from conf pointer.
664          * Need to restore the indirect action index from action conf here.
665          */
666         for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
667              actions++, masks++, i++) {
668                 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
669                         at->actions[i].conf = actions->conf;
670                         at->masks[i].conf = masks->conf;
671                 }
672         }
673         __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
674         LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
675         return at;
676 error:
677         mlx5_free(at);
678         return NULL;
679 }
680
681 /**
682  * Destroy flow action template.
683  *
684  * @param[in] dev
685  *   Pointer to the rte_eth_dev structure.
686  * @param[in] template
687  *   Pointer to the action template to be destroyed.
688  * @param[out] error
689  *   Pointer to error structure.
690  *
691  * @return
692  *   0 on success, a negative errno value otherwise and rte_errno is set.
693  */
694 static int
695 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
696                                  struct rte_flow_actions_template *template,
697                                  struct rte_flow_error *error __rte_unused)
698 {
699         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
700                 DRV_LOG(WARNING, "Action template %p is still in use.",
701                         (void *)template);
702                 return rte_flow_error_set(error, EBUSY,
703                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
704                                    NULL,
705                                    "action template in using");
706         }
707         LIST_REMOVE(template, next);
708         mlx5_free(template);
709         return 0;
710 }
711
712 /**
713  * Create flow item template.
714  *
715  * @param[in] dev
716  *   Pointer to the rte_eth_dev structure.
717  * @param[in] attr
718  *   Pointer to the item template attributes.
719  * @param[in] items
720  *   The template item pattern.
721  * @param[out] error
722  *   Pointer to error structure.
723  *
724  * @return
725  *  Item template pointer on success, NULL otherwise and rte_errno is set.
726  */
727 static struct rte_flow_pattern_template *
728 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
729                              const struct rte_flow_pattern_template_attr *attr,
730                              const struct rte_flow_item items[],
731                              struct rte_flow_error *error)
732 {
733         struct mlx5_priv *priv = dev->data->dev_private;
734         struct rte_flow_pattern_template *it;
735
736         it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
737         if (!it) {
738                 rte_flow_error_set(error, ENOMEM,
739                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
740                                    NULL,
741                                    "cannot allocate item template");
742                 return NULL;
743         }
744         it->attr = *attr;
745         it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
746         if (!it->mt) {
747                 mlx5_free(it);
748                 rte_flow_error_set(error, rte_errno,
749                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
750                                    NULL,
751                                    "cannot create match template");
752                 return NULL;
753         }
754         __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
755         LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
756         return it;
757 }
758
759 /**
760  * Destroy flow item template.
761  *
762  * @param[in] dev
763  *   Pointer to the rte_eth_dev structure.
764  * @param[in] template
765  *   Pointer to the item template to be destroyed.
766  * @param[out] error
767  *   Pointer to error structure.
768  *
769  * @return
770  *   0 on success, a negative errno value otherwise and rte_errno is set.
771  */
772 static int
773 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
774                               struct rte_flow_pattern_template *template,
775                               struct rte_flow_error *error __rte_unused)
776 {
777         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
778                 DRV_LOG(WARNING, "Item template %p is still in use.",
779                         (void *)template);
780                 return rte_flow_error_set(error, EBUSY,
781                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
782                                    NULL,
783                                    "item template in using");
784         }
785         LIST_REMOVE(template, next);
786         claim_zero(mlx5dr_match_template_destroy(template->mt));
787         mlx5_free(template);
788         return 0;
789 }
790
791 /*
792  * Get information about HWS pre-configurable resources.
793  *
794  * @param[in] dev
795  *   Pointer to the rte_eth_dev structure.
796  * @param[out] port_info
797  *   Pointer to port information.
798  * @param[out] queue_info
799  *   Pointer to queue information.
800  * @param[out] error
801  *   Pointer to error structure.
802  *
803  * @return
804  *   0 on success, a negative errno value otherwise and rte_errno is set.
805  */
806 static int
807 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
808                  struct rte_flow_port_info *port_info __rte_unused,
809                  struct rte_flow_queue_info *queue_info __rte_unused,
810                  struct rte_flow_error *error __rte_unused)
811 {
812         /* Nothing to be updated currently. */
813         memset(port_info, 0, sizeof(*port_info));
814         /* Queue size is unlimited from low-level. */
815         queue_info->max_size = UINT32_MAX;
816         return 0;
817 }
818
819 /**
820  * Create group callback.
821  *
822  * @param[in] tool_ctx
823  *   Pointer to the hash list related context.
824  * @param[in] cb_ctx
825  *   Pointer to the group creation context.
826  *
827  * @return
828  *   Group entry on success, NULL otherwise and rte_errno is set.
829  */
830 struct mlx5_list_entry *
831 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
832 {
833         struct mlx5_dev_ctx_shared *sh = tool_ctx;
834         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
835         struct rte_eth_dev *dev = ctx->dev;
836         struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
837         struct mlx5_priv *priv = dev->data->dev_private;
838         struct mlx5dr_table_attr dr_tbl_attr = {0};
839         struct rte_flow_error *error = ctx->error;
840         struct mlx5_flow_group *grp_data;
841         struct mlx5dr_table *tbl = NULL;
842         struct mlx5dr_action *jump;
843         uint32_t idx = 0;
844
845         grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
846         if (!grp_data) {
847                 rte_flow_error_set(error, ENOMEM,
848                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
849                                    NULL,
850                                    "cannot allocate flow table data entry");
851                 return NULL;
852         }
853         dr_tbl_attr.level = attr->group;
854         if (attr->transfer)
855                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
856         else if (attr->egress)
857                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
858         else
859                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
860         tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
861         if (!tbl)
862                 goto error;
863         grp_data->tbl = tbl;
864         if (attr->group) {
865                 /* Jump action be used by non-root table. */
866                 jump = mlx5dr_action_create_dest_table
867                         (priv->dr_ctx, tbl,
868                          mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
869                 if (!jump)
870                         goto error;
871                 grp_data->jump.hws_action = jump;
872                 /* Jump action be used by root table.  */
873                 jump = mlx5dr_action_create_dest_table
874                         (priv->dr_ctx, tbl,
875                          mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
876                                          [dr_tbl_attr.type]);
877                 if (!jump)
878                         goto error;
879                 grp_data->jump.root_action = jump;
880         }
881         grp_data->idx = idx;
882         grp_data->group_id = attr->group;
883         grp_data->type = dr_tbl_attr.type;
884         return &grp_data->entry;
885 error:
886         if (grp_data->jump.root_action)
887                 mlx5dr_action_destroy(grp_data->jump.root_action);
888         if (grp_data->jump.hws_action)
889                 mlx5dr_action_destroy(grp_data->jump.hws_action);
890         if (tbl)
891                 mlx5dr_table_destroy(tbl);
892         if (idx)
893                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
894         rte_flow_error_set(error, ENOMEM,
895                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                            NULL,
897                            "cannot allocate flow dr table");
898         return NULL;
899 }
900
901 /**
902  * Remove group callback.
903  *
904  * @param[in] tool_ctx
905  *   Pointer to the hash list related context.
906  * @param[in] entry
907  *   Pointer to the entry to be removed.
908  */
909 void
910 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
911 {
912         struct mlx5_dev_ctx_shared *sh = tool_ctx;
913         struct mlx5_flow_group *grp_data =
914                     container_of(entry, struct mlx5_flow_group, entry);
915
916         MLX5_ASSERT(entry && sh);
917         /* To use the wrapper glue functions instead. */
918         if (grp_data->jump.hws_action)
919                 mlx5dr_action_destroy(grp_data->jump.hws_action);
920         if (grp_data->jump.root_action)
921                 mlx5dr_action_destroy(grp_data->jump.root_action);
922         mlx5dr_table_destroy(grp_data->tbl);
923         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
924 }
925
926 /**
927  * Match group callback.
928  *
929  * @param[in] tool_ctx
930  *   Pointer to the hash list related context.
931  * @param[in] entry
932  *   Pointer to the group to be matched.
933  * @param[in] cb_ctx
934  *   Pointer to the group matching context.
935  *
936  * @return
937  *   0 on matched, 1 on miss matched.
938  */
939 int
940 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
941                      void *cb_ctx)
942 {
943         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
944         struct mlx5_flow_group *grp_data =
945                 container_of(entry, struct mlx5_flow_group, entry);
946         struct rte_flow_attr *attr =
947                         (struct rte_flow_attr *)ctx->data;
948
949         return (grp_data->group_id != attr->group) ||
950                 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
951                 attr->transfer) ||
952                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
953                 attr->egress) ||
954                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
955                 attr->ingress);
956 }
957
958 /**
959  * Clone group entry callback.
960  *
961  * @param[in] tool_ctx
962  *   Pointer to the hash list related context.
963  * @param[in] entry
964  *   Pointer to the group to be matched.
965  * @param[in] cb_ctx
966  *   Pointer to the group matching context.
967  *
968  * @return
969  *   0 on matched, 1 on miss matched.
970  */
971 struct mlx5_list_entry *
972 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
973                      void *cb_ctx)
974 {
975         struct mlx5_dev_ctx_shared *sh = tool_ctx;
976         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
977         struct mlx5_flow_group *grp_data;
978         struct rte_flow_error *error = ctx->error;
979         uint32_t idx = 0;
980
981         grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
982         if (!grp_data) {
983                 rte_flow_error_set(error, ENOMEM,
984                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
985                                    NULL,
986                                    "cannot allocate flow table data entry");
987                 return NULL;
988         }
989         memcpy(grp_data, oentry, sizeof(*grp_data));
990         grp_data->idx = idx;
991         return &grp_data->entry;
992 }
993
994 /**
995  * Free cloned group entry callback.
996  *
997  * @param[in] tool_ctx
998  *   Pointer to the hash list related context.
999  * @param[in] entry
1000  *   Pointer to the group to be freed.
1001  */
1002 void
1003 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1004 {
1005         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1006         struct mlx5_flow_group *grp_data =
1007                     container_of(entry, struct mlx5_flow_group, entry);
1008
1009         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1010 }
1011
1012 /**
1013  * Configure port HWS resources.
1014  *
1015  * @param[in] dev
1016  *   Pointer to the rte_eth_dev structure.
1017  * @param[in] port_attr
1018  *   Port configuration attributes.
1019  * @param[in] nb_queue
1020  *   Number of queue.
1021  * @param[in] queue_attr
1022  *   Array that holds attributes for each flow queue.
1023  * @param[out] error
1024  *   Pointer to error structure.
1025  *
1026  * @return
1027  *   0 on success, a negative errno value otherwise and rte_errno is set.
1028  */
1029
1030 static int
1031 flow_hw_configure(struct rte_eth_dev *dev,
1032                   const struct rte_flow_port_attr *port_attr,
1033                   uint16_t nb_queue,
1034                   const struct rte_flow_queue_attr *queue_attr[],
1035                   struct rte_flow_error *error)
1036 {
1037         struct mlx5_priv *priv = dev->data->dev_private;
1038         struct mlx5dr_context *dr_ctx = NULL;
1039         struct mlx5dr_context_attr dr_ctx_attr = {0};
1040         struct mlx5_hw_q *hw_q;
1041         struct mlx5_hw_q_job *job = NULL;
1042         uint32_t mem_size, i, j;
1043
1044         if (!port_attr || !nb_queue || !queue_attr) {
1045                 rte_errno = EINVAL;
1046                 goto err;
1047         }
1048         /* In case re-configuring, release existing context at first. */
1049         if (priv->dr_ctx) {
1050                 /* */
1051                 for (i = 0; i < nb_queue; i++) {
1052                         hw_q = &priv->hw_q[i];
1053                         /* Make sure all queues are empty. */
1054                         if (hw_q->size != hw_q->job_idx) {
1055                                 rte_errno = EBUSY;
1056                                 goto err;
1057                         }
1058                 }
1059                 flow_hw_resource_release(dev);
1060         }
1061         /* Allocate the queue job descriptor LIFO. */
1062         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
1063         for (i = 0; i < nb_queue; i++) {
1064                 /*
1065                  * Check if the queues' size are all the same as the
1066                  * limitation from HWS layer.
1067                  */
1068                 if (queue_attr[i]->size != queue_attr[0]->size) {
1069                         rte_errno = EINVAL;
1070                         goto err;
1071                 }
1072                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
1073                             sizeof(struct mlx5_hw_q_job)) *
1074                             queue_attr[0]->size;
1075         }
1076         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
1077                                  64, SOCKET_ID_ANY);
1078         if (!priv->hw_q) {
1079                 rte_errno = ENOMEM;
1080                 goto err;
1081         }
1082         for (i = 0; i < nb_queue; i++) {
1083                 priv->hw_q[i].job_idx = queue_attr[i]->size;
1084                 priv->hw_q[i].size = queue_attr[i]->size;
1085                 if (i == 0)
1086                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1087                                             &priv->hw_q[nb_queue];
1088                 else
1089                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1090                                             &job[queue_attr[i - 1]->size];
1091                 job = (struct mlx5_hw_q_job *)
1092                       &priv->hw_q[i].job[queue_attr[i]->size];
1093                 for (j = 0; j < queue_attr[i]->size; j++)
1094                         priv->hw_q[i].job[j] = &job[j];
1095         }
1096         dr_ctx_attr.pd = priv->sh->cdev->pd;
1097         dr_ctx_attr.queues = nb_queue;
1098         /* Queue size should all be the same. Take the first one. */
1099         dr_ctx_attr.queue_size = queue_attr[0]->size;
1100         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
1101         /* rte_errno has been updated by HWS layer. */
1102         if (!dr_ctx)
1103                 goto err;
1104         priv->dr_ctx = dr_ctx;
1105         priv->nb_queue = nb_queue;
1106         /* Add global actions. */
1107         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1108                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1109                         priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
1110                                 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
1111                         if (!priv->hw_drop[i][j])
1112                                 goto err;
1113                 }
1114         }
1115         return 0;
1116 err:
1117         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1118                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1119                         if (!priv->hw_drop[i][j])
1120                                 continue;
1121                         mlx5dr_action_destroy(priv->hw_drop[i][j]);
1122                 }
1123         }
1124         if (dr_ctx)
1125                 claim_zero(mlx5dr_context_close(dr_ctx));
1126         mlx5_free(priv->hw_q);
1127         priv->hw_q = NULL;
1128         return rte_flow_error_set(error, rte_errno,
1129                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1130                                   "fail to configure port");
1131 }
1132
1133 /**
1134  * Release HWS resources.
1135  *
1136  * @param[in] dev
1137  *   Pointer to the rte_eth_dev structure.
1138  */
1139 void
1140 flow_hw_resource_release(struct rte_eth_dev *dev)
1141 {
1142         struct mlx5_priv *priv = dev->data->dev_private;
1143         struct rte_flow_template_table *tbl;
1144         struct rte_flow_pattern_template *it;
1145         struct rte_flow_actions_template *at;
1146         int i, j;
1147
1148         if (!priv->dr_ctx)
1149                 return;
1150         while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
1151                 tbl = LIST_FIRST(&priv->flow_hw_tbl);
1152                 flow_hw_table_destroy(dev, tbl, NULL);
1153         }
1154         while (!LIST_EMPTY(&priv->flow_hw_itt)) {
1155                 it = LIST_FIRST(&priv->flow_hw_itt);
1156                 flow_hw_pattern_template_destroy(dev, it, NULL);
1157         }
1158         while (!LIST_EMPTY(&priv->flow_hw_at)) {
1159                 at = LIST_FIRST(&priv->flow_hw_at);
1160                 flow_hw_actions_template_destroy(dev, at, NULL);
1161         }
1162         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1163                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1164                         if (!priv->hw_drop[i][j])
1165                                 continue;
1166                         mlx5dr_action_destroy(priv->hw_drop[i][j]);
1167                 }
1168         }
1169         mlx5_free(priv->hw_q);
1170         priv->hw_q = NULL;
1171         claim_zero(mlx5dr_context_close(priv->dr_ctx));
1172         priv->dr_ctx = NULL;
1173         priv->nb_queue = 0;
1174 }
1175
1176 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
1177         .info_get = flow_hw_info_get,
1178         .configure = flow_hw_configure,
1179         .pattern_template_create = flow_hw_pattern_template_create,
1180         .pattern_template_destroy = flow_hw_pattern_template_destroy,
1181         .actions_template_create = flow_hw_actions_template_create,
1182         .actions_template_destroy = flow_hw_actions_template_destroy,
1183         .template_table_create = flow_hw_table_create,
1184         .template_table_destroy = flow_hw_table_destroy,
1185         .async_flow_create = flow_hw_async_flow_create,
1186         .async_flow_destroy = flow_hw_async_flow_destroy,
1187         .pull = flow_hw_pull,
1188         .push = flow_hw_push,
1189 };
1190
1191 #endif