net/mlx5: add flow jump action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12
13 /* The maximum actions support in the flow. */
14 #define MLX5_HW_MAX_ACTS 16
15
16 /* Default push burst threshold. */
17 #define BURST_THR 32u
18
19 /* Default queue to flush the flows. */
20 #define MLX5_DEFAULT_FLUSH_QUEUE 0
21
22 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
23
24 /* DR action flags with different table. */
25 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
26                                 [MLX5DR_TABLE_TYPE_MAX] = {
27         {
28                 MLX5DR_ACTION_FLAG_ROOT_RX,
29                 MLX5DR_ACTION_FLAG_ROOT_TX,
30                 MLX5DR_ACTION_FLAG_ROOT_FDB,
31         },
32         {
33                 MLX5DR_ACTION_FLAG_HWS_RX,
34                 MLX5DR_ACTION_FLAG_HWS_TX,
35                 MLX5DR_ACTION_FLAG_HWS_FDB,
36         },
37 };
38
39 /**
40  * Register destination table DR jump action.
41  *
42  * @param[in] dev
43  *   Pointer to the rte_eth_dev structure.
44  * @param[in] table_attr
45  *   Pointer to the flow attributes.
46  * @param[in] dest_group
47  *   The destination group ID.
48  * @param[out] error
49  *   Pointer to error structure.
50  *
51  * @return
52  *    Table on success, NULL otherwise and rte_errno is set.
53  */
54 static struct mlx5_hw_jump_action *
55 flow_hw_jump_action_register(struct rte_eth_dev *dev,
56                              const struct rte_flow_attr *attr,
57                              uint32_t dest_group,
58                              struct rte_flow_error *error)
59 {
60         struct mlx5_priv *priv = dev->data->dev_private;
61         struct rte_flow_attr jattr = *attr;
62         struct mlx5_flow_group *grp;
63         struct mlx5_flow_cb_ctx ctx = {
64                 .dev = dev,
65                 .error = error,
66                 .data = &jattr,
67         };
68         struct mlx5_list_entry *ge;
69
70         jattr.group = dest_group;
71         ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);
72         if (!ge)
73                 return NULL;
74         grp = container_of(ge, struct mlx5_flow_group, entry);
75         return &grp->jump;
76 }
77
78 /**
79  * Release jump action.
80  *
81  * @param[in] dev
82  *   Pointer to the rte_eth_dev structure.
83  * @param[in] jump
84  *   Pointer to the jump action.
85  */
86
87 static void
88 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
89 {
90         struct mlx5_priv *priv = dev->data->dev_private;
91         struct mlx5_flow_group *grp;
92
93         grp = container_of
94                 (jump, struct mlx5_flow_group, jump);
95         mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
96 }
97
98 /**
99  * Destroy DR actions created by action template.
100  *
101  * For DR actions created during table creation's action translate.
102  * Need to destroy the DR action when destroying the table.
103  *
104  * @param[in] dev
105  *   Pointer to the rte_eth_dev structure.
106  * @param[in] acts
107  *   Pointer to the template HW steering DR actions.
108  */
109 static void
110 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
111                                  struct mlx5_hw_actions *acts)
112 {
113         struct mlx5_priv *priv = dev->data->dev_private;
114
115         if (acts->jump) {
116                 struct mlx5_flow_group *grp;
117
118                 grp = container_of
119                         (acts->jump, struct mlx5_flow_group, jump);
120                 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
121                 acts->jump = NULL;
122         }
123 }
124
125 /**
126  * Append dynamic action to the dynamic action list.
127  *
128  * @param[in] priv
129  *   Pointer to the port private data structure.
130  * @param[in] acts
131  *   Pointer to the template HW steering DR actions.
132  * @param[in] type
133  *   Action type.
134  * @param[in] action_src
135  *   Offset of source rte flow action.
136  * @param[in] action_dst
137  *   Offset of destination DR action.
138  *
139  * @return
140  *    0 on success, negative value otherwise and rte_errno is set.
141  */
142 static __rte_always_inline struct mlx5_action_construct_data *
143 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
144                          enum rte_flow_action_type type,
145                          uint16_t action_src,
146                          uint16_t action_dst)
147 {
148         struct mlx5_action_construct_data *act_data;
149         uint32_t idx = 0;
150
151         act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
152         if (!act_data)
153                 return NULL;
154         act_data->idx = idx;
155         act_data->type = type;
156         act_data->action_src = action_src;
157         act_data->action_dst = action_dst;
158         return act_data;
159 }
160
161 /**
162  * Append dynamic action to the dynamic action list.
163  *
164  * @param[in] priv
165  *   Pointer to the port private data structure.
166  * @param[in] acts
167  *   Pointer to the template HW steering DR actions.
168  * @param[in] type
169  *   Action type.
170  * @param[in] action_src
171  *   Offset of source rte flow action.
172  * @param[in] action_dst
173  *   Offset of destination DR action.
174  *
175  * @return
176  *    0 on success, negative value otherwise and rte_errno is set.
177  */
178 static __rte_always_inline int
179 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
180                                   struct mlx5_hw_actions *acts,
181                                   enum rte_flow_action_type type,
182                                   uint16_t action_src,
183                                   uint16_t action_dst)
184 {       struct mlx5_action_construct_data *act_data;
185
186         act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
187         if (!act_data)
188                 return -1;
189         LIST_INSERT_HEAD(&acts->act_list, act_data, next);
190         return 0;
191 }
192
193 /**
194  * Translate rte_flow actions to DR action.
195  *
196  * As the action template has already indicated the actions. Translate
197  * the rte_flow actions to DR action if possbile. So in flow create
198  * stage we will save cycles from handing the actions' organizing.
199  * For the actions with limited information, need to add these to a
200  * list.
201  *
202  * @param[in] dev
203  *   Pointer to the rte_eth_dev structure.
204  * @param[in] table_attr
205  *   Pointer to the table attributes.
206  * @param[in] item_templates
207  *   Item template array to be binded to the table.
208  * @param[in/out] acts
209  *   Pointer to the template HW steering DR actions.
210  * @param[in] at
211  *   Action template.
212  * @param[out] error
213  *   Pointer to error structure.
214  *
215  * @return
216  *    Table on success, NULL otherwise and rte_errno is set.
217  */
218 static int
219 flow_hw_actions_translate(struct rte_eth_dev *dev,
220                           const struct rte_flow_template_table_attr *table_attr,
221                           struct mlx5_hw_actions *acts,
222                           struct rte_flow_actions_template *at,
223                           struct rte_flow_error *error)
224 {
225         struct mlx5_priv *priv = dev->data->dev_private;
226         const struct rte_flow_attr *attr = &table_attr->flow_attr;
227         struct rte_flow_action *actions = at->actions;
228         struct rte_flow_action *action_start = actions;
229         struct rte_flow_action *masks = at->masks;
230         bool actions_end = false;
231         uint32_t type, i;
232         int err;
233
234         if (attr->transfer)
235                 type = MLX5DR_TABLE_TYPE_FDB;
236         else if (attr->egress)
237                 type = MLX5DR_TABLE_TYPE_NIC_TX;
238         else
239                 type = MLX5DR_TABLE_TYPE_NIC_RX;
240         for (i = 0; !actions_end; actions++, masks++) {
241                 switch (actions->type) {
242                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
243                         break;
244                 case RTE_FLOW_ACTION_TYPE_VOID:
245                         break;
246                 case RTE_FLOW_ACTION_TYPE_DROP:
247                         acts->rule_acts[i++].action =
248                                 priv->hw_drop[!!attr->group][type];
249                         break;
250                 case RTE_FLOW_ACTION_TYPE_JUMP:
251                         if (masks->conf) {
252                                 uint32_t jump_group =
253                                         ((const struct rte_flow_action_jump *)
254                                         actions->conf)->group;
255                                 acts->jump = flow_hw_jump_action_register
256                                                 (dev, attr, jump_group, error);
257                                 if (!acts->jump)
258                                         goto err;
259                                 acts->rule_acts[i].action = (!!attr->group) ?
260                                                 acts->jump->hws_action :
261                                                 acts->jump->root_action;
262                         } else if (__flow_hw_act_data_general_append
263                                         (priv, acts, actions->type,
264                                          actions - action_start, i)){
265                                 goto err;
266                         }
267                         i++;
268                         break;
269                 case RTE_FLOW_ACTION_TYPE_END:
270                         actions_end = true;
271                         break;
272                 default:
273                         break;
274                 }
275         }
276         acts->acts_num = i;
277         return 0;
278 err:
279         err = rte_errno;
280         __flow_hw_action_template_destroy(dev, acts);
281         return rte_flow_error_set(error, err,
282                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
283                                   "fail to create rte table");
284 }
285
286 /**
287  * Construct flow action array.
288  *
289  * For action template contains dynamic actions, these actions need to
290  * be updated according to the rte_flow action during flow creation.
291  *
292  * @param[in] dev
293  *   Pointer to the rte_eth_dev structure.
294  * @param[in] job
295  *   Pointer to job descriptor.
296  * @param[in] hw_acts
297  *   Pointer to translated actions from template.
298  * @param[in] actions
299  *   Array of rte_flow action need to be checked.
300  * @param[in] rule_acts
301  *   Array of DR rule actions to be used during flow creation..
302  * @param[in] acts_num
303  *   Pointer to the real acts_num flow has.
304  *
305  * @return
306  *    0 on success, negative value otherwise and rte_errno is set.
307  */
308 static __rte_always_inline int
309 flow_hw_actions_construct(struct rte_eth_dev *dev,
310                           struct mlx5_hw_q_job *job,
311                           struct mlx5_hw_actions *hw_acts,
312                           const struct rte_flow_action actions[],
313                           struct mlx5dr_rule_action *rule_acts,
314                           uint32_t *acts_num)
315 {
316         struct rte_flow_template_table *table = job->flow->table;
317         struct mlx5_action_construct_data *act_data;
318         const struct rte_flow_action *action;
319         struct rte_flow_attr attr = {
320                         .ingress = 1,
321         };
322
323         memcpy(rule_acts, hw_acts->rule_acts,
324                sizeof(*rule_acts) * hw_acts->acts_num);
325         *acts_num = hw_acts->acts_num;
326         if (LIST_EMPTY(&hw_acts->act_list))
327                 return 0;
328         attr.group = table->grp->group_id;
329         if (table->type == MLX5DR_TABLE_TYPE_FDB) {
330                 attr.transfer = 1;
331                 attr.ingress = 1;
332         } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
333                 attr.egress = 1;
334                 attr.ingress = 0;
335         } else {
336                 attr.ingress = 1;
337         }
338         LIST_FOREACH(act_data, &hw_acts->act_list, next) {
339                 uint32_t jump_group;
340                 struct mlx5_hw_jump_action *jump;
341
342                 action = &actions[act_data->action_src];
343                 MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
344                             (int)action->type == act_data->type);
345                 switch (action->type) {
346                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
347                         break;
348                 case RTE_FLOW_ACTION_TYPE_VOID:
349                         break;
350                 case RTE_FLOW_ACTION_TYPE_JUMP:
351                         jump_group = ((const struct rte_flow_action_jump *)
352                                                 action->conf)->group;
353                         jump = flow_hw_jump_action_register
354                                 (dev, &attr, jump_group, NULL);
355                         if (!jump)
356                                 return -1;
357                         rule_acts[act_data->action_dst].action =
358                         (!!attr.group) ? jump->hws_action : jump->root_action;
359                         job->flow->jump = jump;
360                         job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
361                         break;
362                 default:
363                         break;
364                 }
365         }
366         return 0;
367 }
368
369 /**
370  * Enqueue HW steering flow creation.
371  *
372  * The flow will be applied to the HW only if the postpone bit is not set or
373  * the extra push function is called.
374  * The flow creation status should be checked from dequeue result.
375  *
376  * @param[in] dev
377  *   Pointer to the rte_eth_dev structure.
378  * @param[in] queue
379  *   The queue to create the flow.
380  * @param[in] attr
381  *   Pointer to the flow operation attributes.
382  * @param[in] items
383  *   Items with flow spec value.
384  * @param[in] pattern_template_index
385  *   The item pattern flow follows from the table.
386  * @param[in] actions
387  *   Action with flow spec value.
388  * @param[in] action_template_index
389  *   The action pattern flow follows from the table.
390  * @param[in] user_data
391  *   Pointer to the user_data.
392  * @param[out] error
393  *   Pointer to error structure.
394  *
395  * @return
396  *    Flow pointer on success, NULL otherwise and rte_errno is set.
397  */
398 static struct rte_flow *
399 flow_hw_async_flow_create(struct rte_eth_dev *dev,
400                           uint32_t queue,
401                           const struct rte_flow_op_attr *attr,
402                           struct rte_flow_template_table *table,
403                           const struct rte_flow_item items[],
404                           uint8_t pattern_template_index,
405                           const struct rte_flow_action actions[],
406                           uint8_t action_template_index,
407                           void *user_data,
408                           struct rte_flow_error *error)
409 {
410         struct mlx5_priv *priv = dev->data->dev_private;
411         struct mlx5dr_rule_attr rule_attr = {
412                 .queue_id = queue,
413                 .user_data = user_data,
414                 .burst = attr->postpone,
415         };
416         struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
417         struct mlx5_hw_actions *hw_acts;
418         struct rte_flow_hw *flow;
419         struct mlx5_hw_q_job *job;
420         uint32_t acts_num, flow_idx;
421         int ret;
422
423         if (unlikely(!priv->hw_q[queue].job_idx)) {
424                 rte_errno = ENOMEM;
425                 goto error;
426         }
427         flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
428         if (!flow)
429                 goto error;
430         /*
431          * Set the table here in order to know the destination table
432          * when free the flow afterwards.
433          */
434         flow->table = table;
435         flow->idx = flow_idx;
436         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
437         /*
438          * Set the job type here in order to know if the flow memory
439          * should be freed or not when get the result from dequeue.
440          */
441         job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
442         job->flow = flow;
443         job->user_data = user_data;
444         rule_attr.user_data = job;
445         hw_acts = &table->ats[action_template_index].acts;
446         /* Construct the flow action array based on the input actions.*/
447         flow_hw_actions_construct(dev, job, hw_acts, actions,
448                                   rule_acts, &acts_num);
449         ret = mlx5dr_rule_create(table->matcher,
450                                  pattern_template_index, items,
451                                  rule_acts, acts_num,
452                                  &rule_attr, &flow->rule);
453         if (likely(!ret))
454                 return (struct rte_flow *)flow;
455         /* Flow created fail, return the descriptor and flow memory. */
456         mlx5_ipool_free(table->flow, flow_idx);
457         priv->hw_q[queue].job_idx++;
458 error:
459         rte_flow_error_set(error, rte_errno,
460                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
461                            "fail to create rte flow");
462         return NULL;
463 }
464
465 /**
466  * Enqueue HW steering flow destruction.
467  *
468  * The flow will be applied to the HW only if the postpone bit is not set or
469  * the extra push function is called.
470  * The flow destruction status should be checked from dequeue result.
471  *
472  * @param[in] dev
473  *   Pointer to the rte_eth_dev structure.
474  * @param[in] queue
475  *   The queue to destroy the flow.
476  * @param[in] attr
477  *   Pointer to the flow operation attributes.
478  * @param[in] flow
479  *   Pointer to the flow to be destroyed.
480  * @param[in] user_data
481  *   Pointer to the user_data.
482  * @param[out] error
483  *   Pointer to error structure.
484  *
485  * @return
486  *    0 on success, negative value otherwise and rte_errno is set.
487  */
488 static int
489 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
490                            uint32_t queue,
491                            const struct rte_flow_op_attr *attr,
492                            struct rte_flow *flow,
493                            void *user_data,
494                            struct rte_flow_error *error)
495 {
496         struct mlx5_priv *priv = dev->data->dev_private;
497         struct mlx5dr_rule_attr rule_attr = {
498                 .queue_id = queue,
499                 .user_data = user_data,
500                 .burst = attr->postpone,
501         };
502         struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
503         struct mlx5_hw_q_job *job;
504         int ret;
505
506         if (unlikely(!priv->hw_q[queue].job_idx)) {
507                 rte_errno = ENOMEM;
508                 goto error;
509         }
510         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
511         job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
512         job->user_data = user_data;
513         job->flow = fh;
514         rule_attr.user_data = job;
515         ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
516         if (likely(!ret))
517                 return 0;
518         priv->hw_q[queue].job_idx++;
519 error:
520         return rte_flow_error_set(error, rte_errno,
521                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
522                         "fail to create rte flow");
523 }
524
525 /**
526  * Pull the enqueued flows.
527  *
528  * For flows enqueued from creation/destruction, the status should be
529  * checked from the dequeue result.
530  *
531  * @param[in] dev
532  *   Pointer to the rte_eth_dev structure.
533  * @param[in] queue
534  *   The queue to pull the result.
535  * @param[in/out] res
536  *   Array to save the results.
537  * @param[in] n_res
538  *   Available result with the array.
539  * @param[out] error
540  *   Pointer to error structure.
541  *
542  * @return
543  *    Result number on success, negative value otherwise and rte_errno is set.
544  */
545 static int
546 flow_hw_pull(struct rte_eth_dev *dev,
547              uint32_t queue,
548              struct rte_flow_op_result res[],
549              uint16_t n_res,
550              struct rte_flow_error *error)
551 {
552         struct mlx5_priv *priv = dev->data->dev_private;
553         struct mlx5_hw_q_job *job;
554         int ret, i;
555
556         ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
557         if (ret < 0)
558                 return rte_flow_error_set(error, rte_errno,
559                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
560                                 "fail to query flow queue");
561         for (i = 0; i <  ret; i++) {
562                 job = (struct mlx5_hw_q_job *)res[i].user_data;
563                 /* Restore user data. */
564                 res[i].user_data = job->user_data;
565                 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
566                         if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
567                                 flow_hw_jump_release(dev, job->flow->jump);
568                         mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
569                 }
570                 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
571         }
572         return ret;
573 }
574
575 /**
576  * Push the enqueued flows to HW.
577  *
578  * Force apply all the enqueued flows to the HW.
579  *
580  * @param[in] dev
581  *   Pointer to the rte_eth_dev structure.
582  * @param[in] queue
583  *   The queue to push the flow.
584  * @param[out] error
585  *   Pointer to error structure.
586  *
587  * @return
588  *    0 on success, negative value otherwise and rte_errno is set.
589  */
590 static int
591 flow_hw_push(struct rte_eth_dev *dev,
592              uint32_t queue,
593              struct rte_flow_error *error)
594 {
595         struct mlx5_priv *priv = dev->data->dev_private;
596         int ret;
597
598         ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
599                                        MLX5DR_SEND_QUEUE_ACTION_DRAIN);
600         if (ret) {
601                 rte_flow_error_set(error, rte_errno,
602                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
603                                    "fail to push flows");
604                 return ret;
605         }
606         return 0;
607 }
608
609 /**
610  * Drain the enqueued flows' completion.
611  *
612  * @param[in] dev
613  *   Pointer to the rte_eth_dev structure.
614  * @param[in] queue
615  *   The queue to pull the flow.
616  * @param[in] pending_rules
617  *   The pending flow number.
618  * @param[out] error
619  *   Pointer to error structure.
620  *
621  * @return
622  *    0 on success, negative value otherwise and rte_errno is set.
623  */
624 static int
625 __flow_hw_pull_comp(struct rte_eth_dev *dev,
626                     uint32_t queue,
627                     uint32_t pending_rules,
628                     struct rte_flow_error *error)
629 {
630         struct rte_flow_op_result comp[BURST_THR];
631         int ret, i, empty_loop = 0;
632
633         flow_hw_push(dev, queue, error);
634         while (pending_rules) {
635                 ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
636                 if (ret < 0)
637                         return -1;
638                 if (!ret) {
639                         rte_delay_us_sleep(20000);
640                         if (++empty_loop > 5) {
641                                 DRV_LOG(WARNING, "No available dequeue, quit.");
642                                 break;
643                         }
644                         continue;
645                 }
646                 for (i = 0; i < ret; i++) {
647                         if (comp[i].status == RTE_FLOW_OP_ERROR)
648                                 DRV_LOG(WARNING, "Flow flush get error CQE.");
649                 }
650                 if ((uint32_t)ret > pending_rules) {
651                         DRV_LOG(WARNING, "Flow flush get extra CQE.");
652                         return rte_flow_error_set(error, ERANGE,
653                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
654                                         "get extra CQE");
655                 }
656                 pending_rules -= ret;
657                 empty_loop = 0;
658         }
659         return 0;
660 }
661
662 /**
663  * Flush created flows.
664  *
665  * @param[in] dev
666  *   Pointer to the rte_eth_dev structure.
667  * @param[out] error
668  *   Pointer to error structure.
669  *
670  * @return
671  *    0 on success, negative value otherwise and rte_errno is set.
672  */
673 int
674 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
675                      struct rte_flow_error *error)
676 {
677         struct mlx5_priv *priv = dev->data->dev_private;
678         struct mlx5_hw_q *hw_q;
679         struct rte_flow_template_table *tbl;
680         struct rte_flow_hw *flow;
681         struct rte_flow_op_attr attr = {
682                 .postpone = 0,
683         };
684         uint32_t pending_rules = 0;
685         uint32_t queue;
686         uint32_t fidx;
687
688         /*
689          * Ensure to push and dequeue all the enqueued flow
690          * creation/destruction jobs in case user forgot to
691          * dequeue. Or the enqueued created flows will be
692          * leaked. The forgotten dequeues would also cause
693          * flow flush get extra CQEs as expected and pending_rules
694          * be minus value.
695          */
696         for (queue = 0; queue < priv->nb_queue; queue++) {
697                 hw_q = &priv->hw_q[queue];
698                 if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
699                                         error))
700                         return -1;
701         }
702         /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
703         hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
704         LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
705                 MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
706                         if (flow_hw_async_flow_destroy(dev,
707                                                 MLX5_DEFAULT_FLUSH_QUEUE,
708                                                 &attr,
709                                                 (struct rte_flow *)flow,
710                                                 NULL,
711                                                 error))
712                                 return -1;
713                         pending_rules++;
714                         /* Drain completion with queue size. */
715                         if (pending_rules >= hw_q->size) {
716                                 if (__flow_hw_pull_comp(dev,
717                                                 MLX5_DEFAULT_FLUSH_QUEUE,
718                                                 pending_rules, error))
719                                         return -1;
720                                 pending_rules = 0;
721                         }
722                 }
723         }
724         /* Drain left completion. */
725         if (pending_rules &&
726             __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
727                                 error))
728                 return -1;
729         return 0;
730 }
731
732 /**
733  * Create flow table.
734  *
735  * The input item and action templates will be binded to the table.
736  * Flow memory will also be allocated. Matcher will be created based
737  * on the item template. Action will be translated to the dedicated
738  * DR action if possible.
739  *
740  * @param[in] dev
741  *   Pointer to the rte_eth_dev structure.
742  * @param[in] attr
743  *   Pointer to the table attributes.
744  * @param[in] item_templates
745  *   Item template array to be binded to the table.
746  * @param[in] nb_item_templates
747  *   Number of item template.
748  * @param[in] action_templates
749  *   Action template array to be binded to the table.
750  * @param[in] nb_action_templates
751  *   Number of action template.
752  * @param[out] error
753  *   Pointer to error structure.
754  *
755  * @return
756  *    Table on success, NULL otherwise and rte_errno is set.
757  */
758 static struct rte_flow_template_table *
759 flow_hw_table_create(struct rte_eth_dev *dev,
760                      const struct rte_flow_template_table_attr *attr,
761                      struct rte_flow_pattern_template *item_templates[],
762                      uint8_t nb_item_templates,
763                      struct rte_flow_actions_template *action_templates[],
764                      uint8_t nb_action_templates,
765                      struct rte_flow_error *error)
766 {
767         struct mlx5_priv *priv = dev->data->dev_private;
768         struct mlx5dr_matcher_attr matcher_attr = {0};
769         struct rte_flow_template_table *tbl = NULL;
770         struct mlx5_flow_group *grp;
771         struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
772         struct rte_flow_attr flow_attr = attr->flow_attr;
773         struct mlx5_flow_cb_ctx ctx = {
774                 .dev = dev,
775                 .error = error,
776                 .data = &flow_attr,
777         };
778         struct mlx5_indexed_pool_config cfg = {
779                 .size = sizeof(struct rte_flow_hw),
780                 .trunk_size = 1 << 12,
781                 .per_core_cache = 1 << 13,
782                 .need_lock = 1,
783                 .release_mem_en = !!priv->sh->config.reclaim_mode,
784                 .malloc = mlx5_malloc,
785                 .free = mlx5_free,
786                 .type = "mlx5_hw_table_flow",
787         };
788         struct mlx5_list_entry *ge;
789         uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
790         uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
791         int err;
792
793         /* HWS layer accepts only 1 item template with root table. */
794         if (!attr->flow_attr.group)
795                 max_tpl = 1;
796         cfg.max_idx = nb_flows;
797         /* For table has very limited flows, disable cache. */
798         if (nb_flows < cfg.trunk_size) {
799                 cfg.per_core_cache = 0;
800                 cfg.trunk_size = nb_flows;
801         }
802         /* Check if we requires too many templates. */
803         if (nb_item_templates > max_tpl ||
804             nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
805                 rte_errno = EINVAL;
806                 goto error;
807         }
808         /* Allocate the table memory. */
809         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
810         if (!tbl)
811                 goto error;
812         /* Allocate flow indexed pool. */
813         tbl->flow = mlx5_ipool_create(&cfg);
814         if (!tbl->flow)
815                 goto error;
816         /* Register the flow group. */
817         ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
818         if (!ge)
819                 goto error;
820         grp = container_of(ge, struct mlx5_flow_group, entry);
821         tbl->grp = grp;
822         /* Prepare matcher information. */
823         matcher_attr.priority = attr->flow_attr.priority;
824         matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
825         matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
826         /* Build the item template. */
827         for (i = 0; i < nb_item_templates; i++) {
828                 uint32_t ret;
829
830                 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
831                                          __ATOMIC_RELAXED);
832                 if (ret <= 1) {
833                         rte_errno = EINVAL;
834                         goto it_error;
835                 }
836                 mt[i] = item_templates[i]->mt;
837                 tbl->its[i] = item_templates[i];
838         }
839         tbl->matcher = mlx5dr_matcher_create
840                 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
841         if (!tbl->matcher)
842                 goto it_error;
843         tbl->nb_item_templates = nb_item_templates;
844         /* Build the action template. */
845         for (i = 0; i < nb_action_templates; i++) {
846                 uint32_t ret;
847
848                 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
849                                          __ATOMIC_RELAXED);
850                 if (ret <= 1) {
851                         rte_errno = EINVAL;
852                         goto at_error;
853                 }
854                 LIST_INIT(&tbl->ats[i].acts.act_list);
855                 err = flow_hw_actions_translate(dev, attr,
856                                                 &tbl->ats[i].acts,
857                                                 action_templates[i], error);
858                 if (err) {
859                         i++;
860                         goto at_error;
861                 }
862                 tbl->ats[i].action_template = action_templates[i];
863         }
864         tbl->nb_action_templates = nb_action_templates;
865         tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
866                     (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
867                     MLX5DR_TABLE_TYPE_NIC_RX);
868         LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
869         return tbl;
870 at_error:
871         while (i--) {
872                 __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
873                 __atomic_sub_fetch(&action_templates[i]->refcnt,
874                                    1, __ATOMIC_RELAXED);
875         }
876         i = nb_item_templates;
877 it_error:
878         while (i--)
879                 __atomic_sub_fetch(&item_templates[i]->refcnt,
880                                    1, __ATOMIC_RELAXED);
881         mlx5dr_matcher_destroy(tbl->matcher);
882 error:
883         err = rte_errno;
884         if (tbl) {
885                 if (tbl->grp)
886                         mlx5_hlist_unregister(priv->sh->groups,
887                                               &tbl->grp->entry);
888                 if (tbl->flow)
889                         mlx5_ipool_destroy(tbl->flow);
890                 mlx5_free(tbl);
891         }
892         rte_flow_error_set(error, err,
893                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
894                           "fail to create rte table");
895         return NULL;
896 }
897
898 /**
899  * Destroy flow table.
900  *
901  * @param[in] dev
902  *   Pointer to the rte_eth_dev structure.
903  * @param[in] table
904  *   Pointer to the table to be destroyed.
905  * @param[out] error
906  *   Pointer to error structure.
907  *
908  * @return
909  *   0 on success, a negative errno value otherwise and rte_errno is set.
910  */
911 static int
912 flow_hw_table_destroy(struct rte_eth_dev *dev,
913                       struct rte_flow_template_table *table,
914                       struct rte_flow_error *error)
915 {
916         struct mlx5_priv *priv = dev->data->dev_private;
917         int i;
918
919         if (table->refcnt) {
920                 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
921                 return rte_flow_error_set(error, EBUSY,
922                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
923                                    NULL,
924                                    "table in using");
925         }
926         LIST_REMOVE(table, next);
927         for (i = 0; i < table->nb_item_templates; i++)
928                 __atomic_sub_fetch(&table->its[i]->refcnt,
929                                    1, __ATOMIC_RELAXED);
930         for (i = 0; i < table->nb_action_templates; i++) {
931                 __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
932                 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
933                                    1, __ATOMIC_RELAXED);
934         }
935         mlx5dr_matcher_destroy(table->matcher);
936         mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
937         mlx5_ipool_destroy(table->flow);
938         mlx5_free(table);
939         return 0;
940 }
941
942 /**
943  * Create flow action template.
944  *
945  * @param[in] dev
946  *   Pointer to the rte_eth_dev structure.
947  * @param[in] attr
948  *   Pointer to the action template attributes.
949  * @param[in] actions
950  *   Associated actions (list terminated by the END action).
951  * @param[in] masks
952  *   List of actions that marks which of the action's member is constant.
953  * @param[out] error
954  *   Pointer to error structure.
955  *
956  * @return
957  *   Action template pointer on success, NULL otherwise and rte_errno is set.
958  */
959 static struct rte_flow_actions_template *
960 flow_hw_actions_template_create(struct rte_eth_dev *dev,
961                         const struct rte_flow_actions_template_attr *attr,
962                         const struct rte_flow_action actions[],
963                         const struct rte_flow_action masks[],
964                         struct rte_flow_error *error)
965 {
966         struct mlx5_priv *priv = dev->data->dev_private;
967         int len, act_len, mask_len, i;
968         struct rte_flow_actions_template *at;
969
970         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
971                                 NULL, 0, actions, error);
972         if (act_len <= 0)
973                 return NULL;
974         len = RTE_ALIGN(act_len, 16);
975         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
976                                  NULL, 0, masks, error);
977         if (mask_len <= 0)
978                 return NULL;
979         len += RTE_ALIGN(mask_len, 16);
980         at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
981         if (!at) {
982                 rte_flow_error_set(error, ENOMEM,
983                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
984                                    NULL,
985                                    "cannot allocate action template");
986                 return NULL;
987         }
988         at->attr = *attr;
989         at->actions = (struct rte_flow_action *)(at + 1);
990         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
991                                 actions, error);
992         if (act_len <= 0)
993                 goto error;
994         at->masks = (struct rte_flow_action *)
995                     (((uint8_t *)at->actions) + act_len);
996         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
997                                  len - act_len, masks, error);
998         if (mask_len <= 0)
999                 goto error;
1000         /*
1001          * mlx5 PMD hacks indirect action index directly to the action conf.
1002          * The rte_flow_conv() function copies the content from conf pointer.
1003          * Need to restore the indirect action index from action conf here.
1004          */
1005         for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
1006              actions++, masks++, i++) {
1007                 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
1008                         at->actions[i].conf = actions->conf;
1009                         at->masks[i].conf = masks->conf;
1010                 }
1011         }
1012         __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
1013         LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
1014         return at;
1015 error:
1016         mlx5_free(at);
1017         return NULL;
1018 }
1019
1020 /**
1021  * Destroy flow action template.
1022  *
1023  * @param[in] dev
1024  *   Pointer to the rte_eth_dev structure.
1025  * @param[in] template
1026  *   Pointer to the action template to be destroyed.
1027  * @param[out] error
1028  *   Pointer to error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
1035                                  struct rte_flow_actions_template *template,
1036                                  struct rte_flow_error *error __rte_unused)
1037 {
1038         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1039                 DRV_LOG(WARNING, "Action template %p is still in use.",
1040                         (void *)template);
1041                 return rte_flow_error_set(error, EBUSY,
1042                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1043                                    NULL,
1044                                    "action template in using");
1045         }
1046         LIST_REMOVE(template, next);
1047         mlx5_free(template);
1048         return 0;
1049 }
1050
1051 /**
1052  * Create flow item template.
1053  *
1054  * @param[in] dev
1055  *   Pointer to the rte_eth_dev structure.
1056  * @param[in] attr
1057  *   Pointer to the item template attributes.
1058  * @param[in] items
1059  *   The template item pattern.
1060  * @param[out] error
1061  *   Pointer to error structure.
1062  *
1063  * @return
1064  *  Item template pointer on success, NULL otherwise and rte_errno is set.
1065  */
1066 static struct rte_flow_pattern_template *
1067 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
1068                              const struct rte_flow_pattern_template_attr *attr,
1069                              const struct rte_flow_item items[],
1070                              struct rte_flow_error *error)
1071 {
1072         struct mlx5_priv *priv = dev->data->dev_private;
1073         struct rte_flow_pattern_template *it;
1074
1075         it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
1076         if (!it) {
1077                 rte_flow_error_set(error, ENOMEM,
1078                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1079                                    NULL,
1080                                    "cannot allocate item template");
1081                 return NULL;
1082         }
1083         it->attr = *attr;
1084         it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
1085         if (!it->mt) {
1086                 mlx5_free(it);
1087                 rte_flow_error_set(error, rte_errno,
1088                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1089                                    NULL,
1090                                    "cannot create match template");
1091                 return NULL;
1092         }
1093         __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
1094         LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
1095         return it;
1096 }
1097
1098 /**
1099  * Destroy flow item template.
1100  *
1101  * @param[in] dev
1102  *   Pointer to the rte_eth_dev structure.
1103  * @param[in] template
1104  *   Pointer to the item template to be destroyed.
1105  * @param[out] error
1106  *   Pointer to error structure.
1107  *
1108  * @return
1109  *   0 on success, a negative errno value otherwise and rte_errno is set.
1110  */
1111 static int
1112 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
1113                               struct rte_flow_pattern_template *template,
1114                               struct rte_flow_error *error __rte_unused)
1115 {
1116         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1117                 DRV_LOG(WARNING, "Item template %p is still in use.",
1118                         (void *)template);
1119                 return rte_flow_error_set(error, EBUSY,
1120                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1121                                    NULL,
1122                                    "item template in using");
1123         }
1124         LIST_REMOVE(template, next);
1125         claim_zero(mlx5dr_match_template_destroy(template->mt));
1126         mlx5_free(template);
1127         return 0;
1128 }
1129
1130 /*
1131  * Get information about HWS pre-configurable resources.
1132  *
1133  * @param[in] dev
1134  *   Pointer to the rte_eth_dev structure.
1135  * @param[out] port_info
1136  *   Pointer to port information.
1137  * @param[out] queue_info
1138  *   Pointer to queue information.
1139  * @param[out] error
1140  *   Pointer to error structure.
1141  *
1142  * @return
1143  *   0 on success, a negative errno value otherwise and rte_errno is set.
1144  */
1145 static int
1146 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
1147                  struct rte_flow_port_info *port_info __rte_unused,
1148                  struct rte_flow_queue_info *queue_info __rte_unused,
1149                  struct rte_flow_error *error __rte_unused)
1150 {
1151         /* Nothing to be updated currently. */
1152         memset(port_info, 0, sizeof(*port_info));
1153         /* Queue size is unlimited from low-level. */
1154         queue_info->max_size = UINT32_MAX;
1155         return 0;
1156 }
1157
1158 /**
1159  * Create group callback.
1160  *
1161  * @param[in] tool_ctx
1162  *   Pointer to the hash list related context.
1163  * @param[in] cb_ctx
1164  *   Pointer to the group creation context.
1165  *
1166  * @return
1167  *   Group entry on success, NULL otherwise and rte_errno is set.
1168  */
1169 struct mlx5_list_entry *
1170 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
1171 {
1172         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1173         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1174         struct rte_eth_dev *dev = ctx->dev;
1175         struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
1176         struct mlx5_priv *priv = dev->data->dev_private;
1177         struct mlx5dr_table_attr dr_tbl_attr = {0};
1178         struct rte_flow_error *error = ctx->error;
1179         struct mlx5_flow_group *grp_data;
1180         struct mlx5dr_table *tbl = NULL;
1181         struct mlx5dr_action *jump;
1182         uint32_t idx = 0;
1183
1184         grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1185         if (!grp_data) {
1186                 rte_flow_error_set(error, ENOMEM,
1187                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1188                                    NULL,
1189                                    "cannot allocate flow table data entry");
1190                 return NULL;
1191         }
1192         dr_tbl_attr.level = attr->group;
1193         if (attr->transfer)
1194                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
1195         else if (attr->egress)
1196                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
1197         else
1198                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
1199         tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
1200         if (!tbl)
1201                 goto error;
1202         grp_data->tbl = tbl;
1203         if (attr->group) {
1204                 /* Jump action be used by non-root table. */
1205                 jump = mlx5dr_action_create_dest_table
1206                         (priv->dr_ctx, tbl,
1207                          mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
1208                 if (!jump)
1209                         goto error;
1210                 grp_data->jump.hws_action = jump;
1211                 /* Jump action be used by root table.  */
1212                 jump = mlx5dr_action_create_dest_table
1213                         (priv->dr_ctx, tbl,
1214                          mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
1215                                          [dr_tbl_attr.type]);
1216                 if (!jump)
1217                         goto error;
1218                 grp_data->jump.root_action = jump;
1219         }
1220         grp_data->idx = idx;
1221         grp_data->group_id = attr->group;
1222         grp_data->type = dr_tbl_attr.type;
1223         return &grp_data->entry;
1224 error:
1225         if (grp_data->jump.root_action)
1226                 mlx5dr_action_destroy(grp_data->jump.root_action);
1227         if (grp_data->jump.hws_action)
1228                 mlx5dr_action_destroy(grp_data->jump.hws_action);
1229         if (tbl)
1230                 mlx5dr_table_destroy(tbl);
1231         if (idx)
1232                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
1233         rte_flow_error_set(error, ENOMEM,
1234                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235                            NULL,
1236                            "cannot allocate flow dr table");
1237         return NULL;
1238 }
1239
1240 /**
1241  * Remove group callback.
1242  *
1243  * @param[in] tool_ctx
1244  *   Pointer to the hash list related context.
1245  * @param[in] entry
1246  *   Pointer to the entry to be removed.
1247  */
1248 void
1249 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1250 {
1251         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1252         struct mlx5_flow_group *grp_data =
1253                     container_of(entry, struct mlx5_flow_group, entry);
1254
1255         MLX5_ASSERT(entry && sh);
1256         /* To use the wrapper glue functions instead. */
1257         if (grp_data->jump.hws_action)
1258                 mlx5dr_action_destroy(grp_data->jump.hws_action);
1259         if (grp_data->jump.root_action)
1260                 mlx5dr_action_destroy(grp_data->jump.root_action);
1261         mlx5dr_table_destroy(grp_data->tbl);
1262         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1263 }
1264
1265 /**
1266  * Match group callback.
1267  *
1268  * @param[in] tool_ctx
1269  *   Pointer to the hash list related context.
1270  * @param[in] entry
1271  *   Pointer to the group to be matched.
1272  * @param[in] cb_ctx
1273  *   Pointer to the group matching context.
1274  *
1275  * @return
1276  *   0 on matched, 1 on miss matched.
1277  */
1278 int
1279 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
1280                      void *cb_ctx)
1281 {
1282         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1283         struct mlx5_flow_group *grp_data =
1284                 container_of(entry, struct mlx5_flow_group, entry);
1285         struct rte_flow_attr *attr =
1286                         (struct rte_flow_attr *)ctx->data;
1287
1288         return (grp_data->group_id != attr->group) ||
1289                 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
1290                 attr->transfer) ||
1291                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
1292                 attr->egress) ||
1293                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
1294                 attr->ingress);
1295 }
1296
1297 /**
1298  * Clone group entry callback.
1299  *
1300  * @param[in] tool_ctx
1301  *   Pointer to the hash list related context.
1302  * @param[in] entry
1303  *   Pointer to the group to be matched.
1304  * @param[in] cb_ctx
1305  *   Pointer to the group matching context.
1306  *
1307  * @return
1308  *   0 on matched, 1 on miss matched.
1309  */
1310 struct mlx5_list_entry *
1311 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
1312                      void *cb_ctx)
1313 {
1314         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1315         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1316         struct mlx5_flow_group *grp_data;
1317         struct rte_flow_error *error = ctx->error;
1318         uint32_t idx = 0;
1319
1320         grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1321         if (!grp_data) {
1322                 rte_flow_error_set(error, ENOMEM,
1323                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1324                                    NULL,
1325                                    "cannot allocate flow table data entry");
1326                 return NULL;
1327         }
1328         memcpy(grp_data, oentry, sizeof(*grp_data));
1329         grp_data->idx = idx;
1330         return &grp_data->entry;
1331 }
1332
1333 /**
1334  * Free cloned group entry callback.
1335  *
1336  * @param[in] tool_ctx
1337  *   Pointer to the hash list related context.
1338  * @param[in] entry
1339  *   Pointer to the group to be freed.
1340  */
1341 void
1342 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1343 {
1344         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1345         struct mlx5_flow_group *grp_data =
1346                     container_of(entry, struct mlx5_flow_group, entry);
1347
1348         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1349 }
1350
1351 /**
1352  * Configure port HWS resources.
1353  *
1354  * @param[in] dev
1355  *   Pointer to the rte_eth_dev structure.
1356  * @param[in] port_attr
1357  *   Port configuration attributes.
1358  * @param[in] nb_queue
1359  *   Number of queue.
1360  * @param[in] queue_attr
1361  *   Array that holds attributes for each flow queue.
1362  * @param[out] error
1363  *   Pointer to error structure.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368
1369 static int
1370 flow_hw_configure(struct rte_eth_dev *dev,
1371                   const struct rte_flow_port_attr *port_attr,
1372                   uint16_t nb_queue,
1373                   const struct rte_flow_queue_attr *queue_attr[],
1374                   struct rte_flow_error *error)
1375 {
1376         struct mlx5_priv *priv = dev->data->dev_private;
1377         struct mlx5dr_context *dr_ctx = NULL;
1378         struct mlx5dr_context_attr dr_ctx_attr = {0};
1379         struct mlx5_hw_q *hw_q;
1380         struct mlx5_hw_q_job *job = NULL;
1381         uint32_t mem_size, i, j;
1382         struct mlx5_indexed_pool_config cfg = {
1383                 .size = sizeof(struct rte_flow_hw),
1384                 .trunk_size = 4096,
1385                 .need_lock = 1,
1386                 .release_mem_en = !!priv->sh->config.reclaim_mode,
1387                 .malloc = mlx5_malloc,
1388                 .free = mlx5_free,
1389                 .type = "mlx5_hw_action_construct_data",
1390         };
1391
1392         if (!port_attr || !nb_queue || !queue_attr) {
1393                 rte_errno = EINVAL;
1394                 goto err;
1395         }
1396         /* In case re-configuring, release existing context at first. */
1397         if (priv->dr_ctx) {
1398                 /* */
1399                 for (i = 0; i < nb_queue; i++) {
1400                         hw_q = &priv->hw_q[i];
1401                         /* Make sure all queues are empty. */
1402                         if (hw_q->size != hw_q->job_idx) {
1403                                 rte_errno = EBUSY;
1404                                 goto err;
1405                         }
1406                 }
1407                 flow_hw_resource_release(dev);
1408         }
1409         priv->acts_ipool = mlx5_ipool_create(&cfg);
1410         if (!priv->acts_ipool)
1411                 goto err;
1412         /* Allocate the queue job descriptor LIFO. */
1413         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
1414         for (i = 0; i < nb_queue; i++) {
1415                 /*
1416                  * Check if the queues' size are all the same as the
1417                  * limitation from HWS layer.
1418                  */
1419                 if (queue_attr[i]->size != queue_attr[0]->size) {
1420                         rte_errno = EINVAL;
1421                         goto err;
1422                 }
1423                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
1424                             sizeof(struct mlx5_hw_q_job)) *
1425                             queue_attr[0]->size;
1426         }
1427         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
1428                                  64, SOCKET_ID_ANY);
1429         if (!priv->hw_q) {
1430                 rte_errno = ENOMEM;
1431                 goto err;
1432         }
1433         for (i = 0; i < nb_queue; i++) {
1434                 priv->hw_q[i].job_idx = queue_attr[i]->size;
1435                 priv->hw_q[i].size = queue_attr[i]->size;
1436                 if (i == 0)
1437                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1438                                             &priv->hw_q[nb_queue];
1439                 else
1440                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1441                                             &job[queue_attr[i - 1]->size];
1442                 job = (struct mlx5_hw_q_job *)
1443                       &priv->hw_q[i].job[queue_attr[i]->size];
1444                 for (j = 0; j < queue_attr[i]->size; j++)
1445                         priv->hw_q[i].job[j] = &job[j];
1446         }
1447         dr_ctx_attr.pd = priv->sh->cdev->pd;
1448         dr_ctx_attr.queues = nb_queue;
1449         /* Queue size should all be the same. Take the first one. */
1450         dr_ctx_attr.queue_size = queue_attr[0]->size;
1451         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
1452         /* rte_errno has been updated by HWS layer. */
1453         if (!dr_ctx)
1454                 goto err;
1455         priv->dr_ctx = dr_ctx;
1456         priv->nb_queue = nb_queue;
1457         /* Add global actions. */
1458         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1459                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1460                         priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
1461                                 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
1462                         if (!priv->hw_drop[i][j])
1463                                 goto err;
1464                 }
1465         }
1466         return 0;
1467 err:
1468         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1469                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1470                         if (!priv->hw_drop[i][j])
1471                                 continue;
1472                         mlx5dr_action_destroy(priv->hw_drop[i][j]);
1473                 }
1474         }
1475         if (dr_ctx)
1476                 claim_zero(mlx5dr_context_close(dr_ctx));
1477         mlx5_free(priv->hw_q);
1478         priv->hw_q = NULL;
1479         if (priv->acts_ipool) {
1480                 mlx5_ipool_destroy(priv->acts_ipool);
1481                 priv->acts_ipool = NULL;
1482         }
1483         return rte_flow_error_set(error, rte_errno,
1484                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1485                                   "fail to configure port");
1486 }
1487
1488 /**
1489  * Release HWS resources.
1490  *
1491  * @param[in] dev
1492  *   Pointer to the rte_eth_dev structure.
1493  */
1494 void
1495 flow_hw_resource_release(struct rte_eth_dev *dev)
1496 {
1497         struct mlx5_priv *priv = dev->data->dev_private;
1498         struct rte_flow_template_table *tbl;
1499         struct rte_flow_pattern_template *it;
1500         struct rte_flow_actions_template *at;
1501         int i, j;
1502
1503         if (!priv->dr_ctx)
1504                 return;
1505         while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
1506                 tbl = LIST_FIRST(&priv->flow_hw_tbl);
1507                 flow_hw_table_destroy(dev, tbl, NULL);
1508         }
1509         while (!LIST_EMPTY(&priv->flow_hw_itt)) {
1510                 it = LIST_FIRST(&priv->flow_hw_itt);
1511                 flow_hw_pattern_template_destroy(dev, it, NULL);
1512         }
1513         while (!LIST_EMPTY(&priv->flow_hw_at)) {
1514                 at = LIST_FIRST(&priv->flow_hw_at);
1515                 flow_hw_actions_template_destroy(dev, at, NULL);
1516         }
1517         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1518                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1519                         if (!priv->hw_drop[i][j])
1520                                 continue;
1521                         mlx5dr_action_destroy(priv->hw_drop[i][j]);
1522                 }
1523         }
1524         if (priv->acts_ipool) {
1525                 mlx5_ipool_destroy(priv->acts_ipool);
1526                 priv->acts_ipool = NULL;
1527         }
1528         mlx5_free(priv->hw_q);
1529         priv->hw_q = NULL;
1530         claim_zero(mlx5dr_context_close(priv->dr_ctx));
1531         priv->dr_ctx = NULL;
1532         priv->nb_queue = 0;
1533 }
1534
1535 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
1536         .info_get = flow_hw_info_get,
1537         .configure = flow_hw_configure,
1538         .pattern_template_create = flow_hw_pattern_template_create,
1539         .pattern_template_destroy = flow_hw_pattern_template_destroy,
1540         .actions_template_create = flow_hw_actions_template_create,
1541         .actions_template_destroy = flow_hw_actions_template_destroy,
1542         .template_table_create = flow_hw_table_create,
1543         .template_table_destroy = flow_hw_table_destroy,
1544         .async_flow_create = flow_hw_async_flow_create,
1545         .async_flow_destroy = flow_hw_async_flow_destroy,
1546         .pull = flow_hw_pull,
1547         .push = flow_hw_push,
1548 };
1549
1550 #endif