net/mlx5: add HW mark action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10 #include "mlx5_rx.h"
11
12 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
13
14 /* The maximum actions support in the flow. */
15 #define MLX5_HW_MAX_ACTS 16
16
17 /* Default push burst threshold. */
18 #define BURST_THR 32u
19
20 /* Default queue to flush the flows. */
21 #define MLX5_DEFAULT_FLUSH_QUEUE 0
22
23 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
24
25 /* DR action flags with different table. */
26 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
27                                 [MLX5DR_TABLE_TYPE_MAX] = {
28         {
29                 MLX5DR_ACTION_FLAG_ROOT_RX,
30                 MLX5DR_ACTION_FLAG_ROOT_TX,
31                 MLX5DR_ACTION_FLAG_ROOT_FDB,
32         },
33         {
34                 MLX5DR_ACTION_FLAG_HWS_RX,
35                 MLX5DR_ACTION_FLAG_HWS_TX,
36                 MLX5DR_ACTION_FLAG_HWS_FDB,
37         },
38 };
39
40 /**
41  * Set rxq flag.
42  *
43  * @param[in] dev
44  *   Pointer to the rte_eth_dev structure.
45  * @param[in] enable
46  *   Flag to enable or not.
47  */
48 static void
49 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
50 {
51         struct mlx5_priv *priv = dev->data->dev_private;
52         unsigned int i;
53
54         if ((!priv->mark_enabled && !enable) ||
55             (priv->mark_enabled && enable))
56                 return;
57         for (i = 0; i < priv->rxqs_n; ++i) {
58                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
59
60                 rxq_ctrl->rxq.mark = enable;
61         }
62         priv->mark_enabled = enable;
63 }
64
65 /**
66  * Register destination table DR jump action.
67  *
68  * @param[in] dev
69  *   Pointer to the rte_eth_dev structure.
70  * @param[in] table_attr
71  *   Pointer to the flow attributes.
72  * @param[in] dest_group
73  *   The destination group ID.
74  * @param[out] error
75  *   Pointer to error structure.
76  *
77  * @return
78  *    Table on success, NULL otherwise and rte_errno is set.
79  */
80 static struct mlx5_hw_jump_action *
81 flow_hw_jump_action_register(struct rte_eth_dev *dev,
82                              const struct rte_flow_attr *attr,
83                              uint32_t dest_group,
84                              struct rte_flow_error *error)
85 {
86         struct mlx5_priv *priv = dev->data->dev_private;
87         struct rte_flow_attr jattr = *attr;
88         struct mlx5_flow_group *grp;
89         struct mlx5_flow_cb_ctx ctx = {
90                 .dev = dev,
91                 .error = error,
92                 .data = &jattr,
93         };
94         struct mlx5_list_entry *ge;
95
96         jattr.group = dest_group;
97         ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);
98         if (!ge)
99                 return NULL;
100         grp = container_of(ge, struct mlx5_flow_group, entry);
101         return &grp->jump;
102 }
103
104 /**
105  * Release jump action.
106  *
107  * @param[in] dev
108  *   Pointer to the rte_eth_dev structure.
109  * @param[in] jump
110  *   Pointer to the jump action.
111  */
112
113 static void
114 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
115 {
116         struct mlx5_priv *priv = dev->data->dev_private;
117         struct mlx5_flow_group *grp;
118
119         grp = container_of
120                 (jump, struct mlx5_flow_group, jump);
121         mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
122 }
123
124 /**
125  * Register queue/RSS action.
126  *
127  * @param[in] dev
128  *   Pointer to the rte_eth_dev structure.
129  * @param[in] hws_flags
130  *   DR action flags.
131  * @param[in] action
132  *   rte flow action.
133  *
134  * @return
135  *    Table on success, NULL otherwise and rte_errno is set.
136  */
137 static inline struct mlx5_hrxq*
138 flow_hw_tir_action_register(struct rte_eth_dev *dev,
139                             uint32_t hws_flags,
140                             const struct rte_flow_action *action)
141 {
142         struct mlx5_flow_rss_desc rss_desc = {
143                 .hws_flags = hws_flags,
144         };
145         struct mlx5_hrxq *hrxq;
146
147         if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
148                 const struct rte_flow_action_queue *queue = action->conf;
149
150                 rss_desc.const_q = &queue->index;
151                 rss_desc.queue_num = 1;
152         } else {
153                 const struct rte_flow_action_rss *rss = action->conf;
154
155                 rss_desc.queue_num = rss->queue_num;
156                 rss_desc.const_q = rss->queue;
157                 memcpy(rss_desc.key,
158                        !rss->key ? rss_hash_default_key : rss->key,
159                        MLX5_RSS_HASH_KEY_LEN);
160                 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
161                 rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
162                 flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
163                 flow_dv_action_rss_l34_hash_adjust(rss->types,
164                                                    &rss_desc.hash_fields);
165                 if (rss->level > 1) {
166                         rss_desc.hash_fields |= IBV_RX_HASH_INNER;
167                         rss_desc.tunnel = 1;
168                 }
169         }
170         hrxq = mlx5_hrxq_get(dev, &rss_desc);
171         return hrxq;
172 }
173
174 /**
175  * Destroy DR actions created by action template.
176  *
177  * For DR actions created during table creation's action translate.
178  * Need to destroy the DR action when destroying the table.
179  *
180  * @param[in] dev
181  *   Pointer to the rte_eth_dev structure.
182  * @param[in] acts
183  *   Pointer to the template HW steering DR actions.
184  */
185 static void
186 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
187                                  struct mlx5_hw_actions *acts)
188 {
189         struct mlx5_priv *priv = dev->data->dev_private;
190
191         if (acts->jump) {
192                 struct mlx5_flow_group *grp;
193
194                 grp = container_of
195                         (acts->jump, struct mlx5_flow_group, jump);
196                 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
197                 acts->jump = NULL;
198         }
199 }
200
201 /**
202  * Append dynamic action to the dynamic action list.
203  *
204  * @param[in] priv
205  *   Pointer to the port private data structure.
206  * @param[in] acts
207  *   Pointer to the template HW steering DR actions.
208  * @param[in] type
209  *   Action type.
210  * @param[in] action_src
211  *   Offset of source rte flow action.
212  * @param[in] action_dst
213  *   Offset of destination DR action.
214  *
215  * @return
216  *    0 on success, negative value otherwise and rte_errno is set.
217  */
218 static __rte_always_inline struct mlx5_action_construct_data *
219 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
220                          enum rte_flow_action_type type,
221                          uint16_t action_src,
222                          uint16_t action_dst)
223 {
224         struct mlx5_action_construct_data *act_data;
225         uint32_t idx = 0;
226
227         act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
228         if (!act_data)
229                 return NULL;
230         act_data->idx = idx;
231         act_data->type = type;
232         act_data->action_src = action_src;
233         act_data->action_dst = action_dst;
234         return act_data;
235 }
236
237 /**
238  * Append dynamic action to the dynamic action list.
239  *
240  * @param[in] priv
241  *   Pointer to the port private data structure.
242  * @param[in] acts
243  *   Pointer to the template HW steering DR actions.
244  * @param[in] type
245  *   Action type.
246  * @param[in] action_src
247  *   Offset of source rte flow action.
248  * @param[in] action_dst
249  *   Offset of destination DR action.
250  *
251  * @return
252  *    0 on success, negative value otherwise and rte_errno is set.
253  */
254 static __rte_always_inline int
255 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
256                                   struct mlx5_hw_actions *acts,
257                                   enum rte_flow_action_type type,
258                                   uint16_t action_src,
259                                   uint16_t action_dst)
260 {       struct mlx5_action_construct_data *act_data;
261
262         act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
263         if (!act_data)
264                 return -1;
265         LIST_INSERT_HEAD(&acts->act_list, act_data, next);
266         return 0;
267 }
268
269 /**
270  * Translate rte_flow actions to DR action.
271  *
272  * As the action template has already indicated the actions. Translate
273  * the rte_flow actions to DR action if possbile. So in flow create
274  * stage we will save cycles from handing the actions' organizing.
275  * For the actions with limited information, need to add these to a
276  * list.
277  *
278  * @param[in] dev
279  *   Pointer to the rte_eth_dev structure.
280  * @param[in] table_attr
281  *   Pointer to the table attributes.
282  * @param[in] item_templates
283  *   Item template array to be binded to the table.
284  * @param[in/out] acts
285  *   Pointer to the template HW steering DR actions.
286  * @param[in] at
287  *   Action template.
288  * @param[out] error
289  *   Pointer to error structure.
290  *
291  * @return
292  *    Table on success, NULL otherwise and rte_errno is set.
293  */
294 static int
295 flow_hw_actions_translate(struct rte_eth_dev *dev,
296                           const struct rte_flow_template_table_attr *table_attr,
297                           struct mlx5_hw_actions *acts,
298                           struct rte_flow_actions_template *at,
299                           struct rte_flow_error *error)
300 {
301         struct mlx5_priv *priv = dev->data->dev_private;
302         const struct rte_flow_attr *attr = &table_attr->flow_attr;
303         struct rte_flow_action *actions = at->actions;
304         struct rte_flow_action *action_start = actions;
305         struct rte_flow_action *masks = at->masks;
306         bool actions_end = false;
307         uint32_t type, i;
308         int err;
309
310         if (attr->transfer)
311                 type = MLX5DR_TABLE_TYPE_FDB;
312         else if (attr->egress)
313                 type = MLX5DR_TABLE_TYPE_NIC_TX;
314         else
315                 type = MLX5DR_TABLE_TYPE_NIC_RX;
316         for (i = 0; !actions_end; actions++, masks++) {
317                 switch (actions->type) {
318                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
319                         break;
320                 case RTE_FLOW_ACTION_TYPE_VOID:
321                         break;
322                 case RTE_FLOW_ACTION_TYPE_DROP:
323                         acts->rule_acts[i++].action =
324                                 priv->hw_drop[!!attr->group][type];
325                         break;
326                 case RTE_FLOW_ACTION_TYPE_MARK:
327                         acts->mark = true;
328                         if (masks->conf)
329                                 acts->rule_acts[i].tag.value =
330                                         mlx5_flow_mark_set
331                                         (((const struct rte_flow_action_mark *)
332                                         (masks->conf))->id);
333                         else if (__flow_hw_act_data_general_append(priv, acts,
334                                 actions->type, actions - action_start, i))
335                                 goto err;
336                         acts->rule_acts[i++].action =
337                                 priv->hw_tag[!!attr->group];
338                         flow_hw_rxq_flag_set(dev, true);
339                         break;
340                 case RTE_FLOW_ACTION_TYPE_JUMP:
341                         if (masks->conf) {
342                                 uint32_t jump_group =
343                                         ((const struct rte_flow_action_jump *)
344                                         actions->conf)->group;
345                                 acts->jump = flow_hw_jump_action_register
346                                                 (dev, attr, jump_group, error);
347                                 if (!acts->jump)
348                                         goto err;
349                                 acts->rule_acts[i].action = (!!attr->group) ?
350                                                 acts->jump->hws_action :
351                                                 acts->jump->root_action;
352                         } else if (__flow_hw_act_data_general_append
353                                         (priv, acts, actions->type,
354                                          actions - action_start, i)){
355                                 goto err;
356                         }
357                         i++;
358                         break;
359                 case RTE_FLOW_ACTION_TYPE_QUEUE:
360                         if (masks->conf) {
361                                 acts->tir = flow_hw_tir_action_register
362                                 (dev,
363                                  mlx5_hw_act_flag[!!attr->group][type],
364                                  actions);
365                                 if (!acts->tir)
366                                         goto err;
367                                 acts->rule_acts[i].action =
368                                         acts->tir->action;
369                         } else if (__flow_hw_act_data_general_append
370                                         (priv, acts, actions->type,
371                                          actions - action_start, i)) {
372                                 goto err;
373                         }
374                         i++;
375                         break;
376                 case RTE_FLOW_ACTION_TYPE_RSS:
377                         if (masks->conf) {
378                                 acts->tir = flow_hw_tir_action_register
379                                 (dev,
380                                  mlx5_hw_act_flag[!!attr->group][type],
381                                  actions);
382                                 if (!acts->tir)
383                                         goto err;
384                                 acts->rule_acts[i].action =
385                                         acts->tir->action;
386                         } else if (__flow_hw_act_data_general_append
387                                         (priv, acts, actions->type,
388                                          actions - action_start, i)) {
389                                 goto err;
390                         }
391                         i++;
392                         break;
393                 case RTE_FLOW_ACTION_TYPE_END:
394                         actions_end = true;
395                         break;
396                 default:
397                         break;
398                 }
399         }
400         acts->acts_num = i;
401         return 0;
402 err:
403         err = rte_errno;
404         __flow_hw_action_template_destroy(dev, acts);
405         return rte_flow_error_set(error, err,
406                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
407                                   "fail to create rte table");
408 }
409
410 /**
411  * Construct flow action array.
412  *
413  * For action template contains dynamic actions, these actions need to
414  * be updated according to the rte_flow action during flow creation.
415  *
416  * @param[in] dev
417  *   Pointer to the rte_eth_dev structure.
418  * @param[in] job
419  *   Pointer to job descriptor.
420  * @param[in] hw_acts
421  *   Pointer to translated actions from template.
422  * @param[in] actions
423  *   Array of rte_flow action need to be checked.
424  * @param[in] rule_acts
425  *   Array of DR rule actions to be used during flow creation..
426  * @param[in] acts_num
427  *   Pointer to the real acts_num flow has.
428  *
429  * @return
430  *    0 on success, negative value otherwise and rte_errno is set.
431  */
432 static __rte_always_inline int
433 flow_hw_actions_construct(struct rte_eth_dev *dev,
434                           struct mlx5_hw_q_job *job,
435                           struct mlx5_hw_actions *hw_acts,
436                           const struct rte_flow_action actions[],
437                           struct mlx5dr_rule_action *rule_acts,
438                           uint32_t *acts_num)
439 {
440         struct rte_flow_template_table *table = job->flow->table;
441         struct mlx5_action_construct_data *act_data;
442         const struct rte_flow_action *action;
443         struct rte_flow_attr attr = {
444                         .ingress = 1,
445         };
446         uint32_t ft_flag;
447
448         memcpy(rule_acts, hw_acts->rule_acts,
449                sizeof(*rule_acts) * hw_acts->acts_num);
450         *acts_num = hw_acts->acts_num;
451         if (LIST_EMPTY(&hw_acts->act_list))
452                 return 0;
453         attr.group = table->grp->group_id;
454         ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
455         if (table->type == MLX5DR_TABLE_TYPE_FDB) {
456                 attr.transfer = 1;
457                 attr.ingress = 1;
458         } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
459                 attr.egress = 1;
460                 attr.ingress = 0;
461         } else {
462                 attr.ingress = 1;
463         }
464         LIST_FOREACH(act_data, &hw_acts->act_list, next) {
465                 uint32_t jump_group;
466                 uint32_t tag;
467                 struct mlx5_hw_jump_action *jump;
468                 struct mlx5_hrxq *hrxq;
469
470                 action = &actions[act_data->action_src];
471                 MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
472                             (int)action->type == act_data->type);
473                 switch (action->type) {
474                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
475                         break;
476                 case RTE_FLOW_ACTION_TYPE_VOID:
477                         break;
478                 case RTE_FLOW_ACTION_TYPE_MARK:
479                         tag = mlx5_flow_mark_set
480                               (((const struct rte_flow_action_mark *)
481                               (action->conf))->id);
482                         rule_acts[act_data->action_dst].tag.value = tag;
483                         break;
484                 case RTE_FLOW_ACTION_TYPE_JUMP:
485                         jump_group = ((const struct rte_flow_action_jump *)
486                                                 action->conf)->group;
487                         jump = flow_hw_jump_action_register
488                                 (dev, &attr, jump_group, NULL);
489                         if (!jump)
490                                 return -1;
491                         rule_acts[act_data->action_dst].action =
492                         (!!attr.group) ? jump->hws_action : jump->root_action;
493                         job->flow->jump = jump;
494                         job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
495                         break;
496                 case RTE_FLOW_ACTION_TYPE_RSS:
497                 case RTE_FLOW_ACTION_TYPE_QUEUE:
498                         hrxq = flow_hw_tir_action_register(dev,
499                                         ft_flag,
500                                         action);
501                         if (!hrxq)
502                                 return -1;
503                         rule_acts[act_data->action_dst].action = hrxq->action;
504                         job->flow->hrxq = hrxq;
505                         job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
506                         break;
507                 default:
508                         break;
509                 }
510         }
511         return 0;
512 }
513
514 /**
515  * Enqueue HW steering flow creation.
516  *
517  * The flow will be applied to the HW only if the postpone bit is not set or
518  * the extra push function is called.
519  * The flow creation status should be checked from dequeue result.
520  *
521  * @param[in] dev
522  *   Pointer to the rte_eth_dev structure.
523  * @param[in] queue
524  *   The queue to create the flow.
525  * @param[in] attr
526  *   Pointer to the flow operation attributes.
527  * @param[in] items
528  *   Items with flow spec value.
529  * @param[in] pattern_template_index
530  *   The item pattern flow follows from the table.
531  * @param[in] actions
532  *   Action with flow spec value.
533  * @param[in] action_template_index
534  *   The action pattern flow follows from the table.
535  * @param[in] user_data
536  *   Pointer to the user_data.
537  * @param[out] error
538  *   Pointer to error structure.
539  *
540  * @return
541  *    Flow pointer on success, NULL otherwise and rte_errno is set.
542  */
543 static struct rte_flow *
544 flow_hw_async_flow_create(struct rte_eth_dev *dev,
545                           uint32_t queue,
546                           const struct rte_flow_op_attr *attr,
547                           struct rte_flow_template_table *table,
548                           const struct rte_flow_item items[],
549                           uint8_t pattern_template_index,
550                           const struct rte_flow_action actions[],
551                           uint8_t action_template_index,
552                           void *user_data,
553                           struct rte_flow_error *error)
554 {
555         struct mlx5_priv *priv = dev->data->dev_private;
556         struct mlx5dr_rule_attr rule_attr = {
557                 .queue_id = queue,
558                 .user_data = user_data,
559                 .burst = attr->postpone,
560         };
561         struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
562         struct mlx5_hw_actions *hw_acts;
563         struct rte_flow_hw *flow;
564         struct mlx5_hw_q_job *job;
565         uint32_t acts_num, flow_idx;
566         int ret;
567
568         if (unlikely(!priv->hw_q[queue].job_idx)) {
569                 rte_errno = ENOMEM;
570                 goto error;
571         }
572         flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
573         if (!flow)
574                 goto error;
575         /*
576          * Set the table here in order to know the destination table
577          * when free the flow afterwards.
578          */
579         flow->table = table;
580         flow->idx = flow_idx;
581         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
582         /*
583          * Set the job type here in order to know if the flow memory
584          * should be freed or not when get the result from dequeue.
585          */
586         job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
587         job->flow = flow;
588         job->user_data = user_data;
589         rule_attr.user_data = job;
590         hw_acts = &table->ats[action_template_index].acts;
591         /* Construct the flow action array based on the input actions.*/
592         flow_hw_actions_construct(dev, job, hw_acts, actions,
593                                   rule_acts, &acts_num);
594         ret = mlx5dr_rule_create(table->matcher,
595                                  pattern_template_index, items,
596                                  rule_acts, acts_num,
597                                  &rule_attr, &flow->rule);
598         if (likely(!ret))
599                 return (struct rte_flow *)flow;
600         /* Flow created fail, return the descriptor and flow memory. */
601         mlx5_ipool_free(table->flow, flow_idx);
602         priv->hw_q[queue].job_idx++;
603 error:
604         rte_flow_error_set(error, rte_errno,
605                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
606                            "fail to create rte flow");
607         return NULL;
608 }
609
610 /**
611  * Enqueue HW steering flow destruction.
612  *
613  * The flow will be applied to the HW only if the postpone bit is not set or
614  * the extra push function is called.
615  * The flow destruction status should be checked from dequeue result.
616  *
617  * @param[in] dev
618  *   Pointer to the rte_eth_dev structure.
619  * @param[in] queue
620  *   The queue to destroy the flow.
621  * @param[in] attr
622  *   Pointer to the flow operation attributes.
623  * @param[in] flow
624  *   Pointer to the flow to be destroyed.
625  * @param[in] user_data
626  *   Pointer to the user_data.
627  * @param[out] error
628  *   Pointer to error structure.
629  *
630  * @return
631  *    0 on success, negative value otherwise and rte_errno is set.
632  */
633 static int
634 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
635                            uint32_t queue,
636                            const struct rte_flow_op_attr *attr,
637                            struct rte_flow *flow,
638                            void *user_data,
639                            struct rte_flow_error *error)
640 {
641         struct mlx5_priv *priv = dev->data->dev_private;
642         struct mlx5dr_rule_attr rule_attr = {
643                 .queue_id = queue,
644                 .user_data = user_data,
645                 .burst = attr->postpone,
646         };
647         struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
648         struct mlx5_hw_q_job *job;
649         int ret;
650
651         if (unlikely(!priv->hw_q[queue].job_idx)) {
652                 rte_errno = ENOMEM;
653                 goto error;
654         }
655         job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
656         job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
657         job->user_data = user_data;
658         job->flow = fh;
659         rule_attr.user_data = job;
660         ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
661         if (likely(!ret))
662                 return 0;
663         priv->hw_q[queue].job_idx++;
664 error:
665         return rte_flow_error_set(error, rte_errno,
666                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
667                         "fail to create rte flow");
668 }
669
670 /**
671  * Pull the enqueued flows.
672  *
673  * For flows enqueued from creation/destruction, the status should be
674  * checked from the dequeue result.
675  *
676  * @param[in] dev
677  *   Pointer to the rte_eth_dev structure.
678  * @param[in] queue
679  *   The queue to pull the result.
680  * @param[in/out] res
681  *   Array to save the results.
682  * @param[in] n_res
683  *   Available result with the array.
684  * @param[out] error
685  *   Pointer to error structure.
686  *
687  * @return
688  *    Result number on success, negative value otherwise and rte_errno is set.
689  */
690 static int
691 flow_hw_pull(struct rte_eth_dev *dev,
692              uint32_t queue,
693              struct rte_flow_op_result res[],
694              uint16_t n_res,
695              struct rte_flow_error *error)
696 {
697         struct mlx5_priv *priv = dev->data->dev_private;
698         struct mlx5_hw_q_job *job;
699         int ret, i;
700
701         ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
702         if (ret < 0)
703                 return rte_flow_error_set(error, rte_errno,
704                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
705                                 "fail to query flow queue");
706         for (i = 0; i <  ret; i++) {
707                 job = (struct mlx5_hw_q_job *)res[i].user_data;
708                 /* Restore user data. */
709                 res[i].user_data = job->user_data;
710                 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
711                         if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
712                                 flow_hw_jump_release(dev, job->flow->jump);
713                         else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
714                                 mlx5_hrxq_obj_release(dev, job->flow->hrxq);
715                         mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
716                 }
717                 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
718         }
719         return ret;
720 }
721
722 /**
723  * Push the enqueued flows to HW.
724  *
725  * Force apply all the enqueued flows to the HW.
726  *
727  * @param[in] dev
728  *   Pointer to the rte_eth_dev structure.
729  * @param[in] queue
730  *   The queue to push the flow.
731  * @param[out] error
732  *   Pointer to error structure.
733  *
734  * @return
735  *    0 on success, negative value otherwise and rte_errno is set.
736  */
737 static int
738 flow_hw_push(struct rte_eth_dev *dev,
739              uint32_t queue,
740              struct rte_flow_error *error)
741 {
742         struct mlx5_priv *priv = dev->data->dev_private;
743         int ret;
744
745         ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
746                                        MLX5DR_SEND_QUEUE_ACTION_DRAIN);
747         if (ret) {
748                 rte_flow_error_set(error, rte_errno,
749                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
750                                    "fail to push flows");
751                 return ret;
752         }
753         return 0;
754 }
755
756 /**
757  * Drain the enqueued flows' completion.
758  *
759  * @param[in] dev
760  *   Pointer to the rte_eth_dev structure.
761  * @param[in] queue
762  *   The queue to pull the flow.
763  * @param[in] pending_rules
764  *   The pending flow number.
765  * @param[out] error
766  *   Pointer to error structure.
767  *
768  * @return
769  *    0 on success, negative value otherwise and rte_errno is set.
770  */
771 static int
772 __flow_hw_pull_comp(struct rte_eth_dev *dev,
773                     uint32_t queue,
774                     uint32_t pending_rules,
775                     struct rte_flow_error *error)
776 {
777         struct rte_flow_op_result comp[BURST_THR];
778         int ret, i, empty_loop = 0;
779
780         flow_hw_push(dev, queue, error);
781         while (pending_rules) {
782                 ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
783                 if (ret < 0)
784                         return -1;
785                 if (!ret) {
786                         rte_delay_us_sleep(20000);
787                         if (++empty_loop > 5) {
788                                 DRV_LOG(WARNING, "No available dequeue, quit.");
789                                 break;
790                         }
791                         continue;
792                 }
793                 for (i = 0; i < ret; i++) {
794                         if (comp[i].status == RTE_FLOW_OP_ERROR)
795                                 DRV_LOG(WARNING, "Flow flush get error CQE.");
796                 }
797                 if ((uint32_t)ret > pending_rules) {
798                         DRV_LOG(WARNING, "Flow flush get extra CQE.");
799                         return rte_flow_error_set(error, ERANGE,
800                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
801                                         "get extra CQE");
802                 }
803                 pending_rules -= ret;
804                 empty_loop = 0;
805         }
806         return 0;
807 }
808
809 /**
810  * Flush created flows.
811  *
812  * @param[in] dev
813  *   Pointer to the rte_eth_dev structure.
814  * @param[out] error
815  *   Pointer to error structure.
816  *
817  * @return
818  *    0 on success, negative value otherwise and rte_errno is set.
819  */
820 int
821 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
822                      struct rte_flow_error *error)
823 {
824         struct mlx5_priv *priv = dev->data->dev_private;
825         struct mlx5_hw_q *hw_q;
826         struct rte_flow_template_table *tbl;
827         struct rte_flow_hw *flow;
828         struct rte_flow_op_attr attr = {
829                 .postpone = 0,
830         };
831         uint32_t pending_rules = 0;
832         uint32_t queue;
833         uint32_t fidx;
834
835         /*
836          * Ensure to push and dequeue all the enqueued flow
837          * creation/destruction jobs in case user forgot to
838          * dequeue. Or the enqueued created flows will be
839          * leaked. The forgotten dequeues would also cause
840          * flow flush get extra CQEs as expected and pending_rules
841          * be minus value.
842          */
843         for (queue = 0; queue < priv->nb_queue; queue++) {
844                 hw_q = &priv->hw_q[queue];
845                 if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
846                                         error))
847                         return -1;
848         }
849         /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
850         hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
851         LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
852                 MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
853                         if (flow_hw_async_flow_destroy(dev,
854                                                 MLX5_DEFAULT_FLUSH_QUEUE,
855                                                 &attr,
856                                                 (struct rte_flow *)flow,
857                                                 NULL,
858                                                 error))
859                                 return -1;
860                         pending_rules++;
861                         /* Drain completion with queue size. */
862                         if (pending_rules >= hw_q->size) {
863                                 if (__flow_hw_pull_comp(dev,
864                                                 MLX5_DEFAULT_FLUSH_QUEUE,
865                                                 pending_rules, error))
866                                         return -1;
867                                 pending_rules = 0;
868                         }
869                 }
870         }
871         /* Drain left completion. */
872         if (pending_rules &&
873             __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
874                                 error))
875                 return -1;
876         return 0;
877 }
878
879 /**
880  * Create flow table.
881  *
882  * The input item and action templates will be binded to the table.
883  * Flow memory will also be allocated. Matcher will be created based
884  * on the item template. Action will be translated to the dedicated
885  * DR action if possible.
886  *
887  * @param[in] dev
888  *   Pointer to the rte_eth_dev structure.
889  * @param[in] attr
890  *   Pointer to the table attributes.
891  * @param[in] item_templates
892  *   Item template array to be binded to the table.
893  * @param[in] nb_item_templates
894  *   Number of item template.
895  * @param[in] action_templates
896  *   Action template array to be binded to the table.
897  * @param[in] nb_action_templates
898  *   Number of action template.
899  * @param[out] error
900  *   Pointer to error structure.
901  *
902  * @return
903  *    Table on success, NULL otherwise and rte_errno is set.
904  */
905 static struct rte_flow_template_table *
906 flow_hw_table_create(struct rte_eth_dev *dev,
907                      const struct rte_flow_template_table_attr *attr,
908                      struct rte_flow_pattern_template *item_templates[],
909                      uint8_t nb_item_templates,
910                      struct rte_flow_actions_template *action_templates[],
911                      uint8_t nb_action_templates,
912                      struct rte_flow_error *error)
913 {
914         struct mlx5_priv *priv = dev->data->dev_private;
915         struct mlx5dr_matcher_attr matcher_attr = {0};
916         struct rte_flow_template_table *tbl = NULL;
917         struct mlx5_flow_group *grp;
918         struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
919         struct rte_flow_attr flow_attr = attr->flow_attr;
920         struct mlx5_flow_cb_ctx ctx = {
921                 .dev = dev,
922                 .error = error,
923                 .data = &flow_attr,
924         };
925         struct mlx5_indexed_pool_config cfg = {
926                 .size = sizeof(struct rte_flow_hw),
927                 .trunk_size = 1 << 12,
928                 .per_core_cache = 1 << 13,
929                 .need_lock = 1,
930                 .release_mem_en = !!priv->sh->config.reclaim_mode,
931                 .malloc = mlx5_malloc,
932                 .free = mlx5_free,
933                 .type = "mlx5_hw_table_flow",
934         };
935         struct mlx5_list_entry *ge;
936         uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
937         uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
938         int err;
939
940         /* HWS layer accepts only 1 item template with root table. */
941         if (!attr->flow_attr.group)
942                 max_tpl = 1;
943         cfg.max_idx = nb_flows;
944         /* For table has very limited flows, disable cache. */
945         if (nb_flows < cfg.trunk_size) {
946                 cfg.per_core_cache = 0;
947                 cfg.trunk_size = nb_flows;
948         }
949         /* Check if we requires too many templates. */
950         if (nb_item_templates > max_tpl ||
951             nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
952                 rte_errno = EINVAL;
953                 goto error;
954         }
955         /* Allocate the table memory. */
956         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
957         if (!tbl)
958                 goto error;
959         /* Allocate flow indexed pool. */
960         tbl->flow = mlx5_ipool_create(&cfg);
961         if (!tbl->flow)
962                 goto error;
963         /* Register the flow group. */
964         ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
965         if (!ge)
966                 goto error;
967         grp = container_of(ge, struct mlx5_flow_group, entry);
968         tbl->grp = grp;
969         /* Prepare matcher information. */
970         matcher_attr.priority = attr->flow_attr.priority;
971         matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
972         matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
973         /* Build the item template. */
974         for (i = 0; i < nb_item_templates; i++) {
975                 uint32_t ret;
976
977                 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
978                                          __ATOMIC_RELAXED);
979                 if (ret <= 1) {
980                         rte_errno = EINVAL;
981                         goto it_error;
982                 }
983                 mt[i] = item_templates[i]->mt;
984                 tbl->its[i] = item_templates[i];
985         }
986         tbl->matcher = mlx5dr_matcher_create
987                 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
988         if (!tbl->matcher)
989                 goto it_error;
990         tbl->nb_item_templates = nb_item_templates;
991         /* Build the action template. */
992         for (i = 0; i < nb_action_templates; i++) {
993                 uint32_t ret;
994
995                 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
996                                          __ATOMIC_RELAXED);
997                 if (ret <= 1) {
998                         rte_errno = EINVAL;
999                         goto at_error;
1000                 }
1001                 LIST_INIT(&tbl->ats[i].acts.act_list);
1002                 err = flow_hw_actions_translate(dev, attr,
1003                                                 &tbl->ats[i].acts,
1004                                                 action_templates[i], error);
1005                 if (err) {
1006                         i++;
1007                         goto at_error;
1008                 }
1009                 tbl->ats[i].action_template = action_templates[i];
1010         }
1011         tbl->nb_action_templates = nb_action_templates;
1012         tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
1013                     (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
1014                     MLX5DR_TABLE_TYPE_NIC_RX);
1015         LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
1016         return tbl;
1017 at_error:
1018         while (i--) {
1019                 __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
1020                 __atomic_sub_fetch(&action_templates[i]->refcnt,
1021                                    1, __ATOMIC_RELAXED);
1022         }
1023         i = nb_item_templates;
1024 it_error:
1025         while (i--)
1026                 __atomic_sub_fetch(&item_templates[i]->refcnt,
1027                                    1, __ATOMIC_RELAXED);
1028         mlx5dr_matcher_destroy(tbl->matcher);
1029 error:
1030         err = rte_errno;
1031         if (tbl) {
1032                 if (tbl->grp)
1033                         mlx5_hlist_unregister(priv->sh->groups,
1034                                               &tbl->grp->entry);
1035                 if (tbl->flow)
1036                         mlx5_ipool_destroy(tbl->flow);
1037                 mlx5_free(tbl);
1038         }
1039         rte_flow_error_set(error, err,
1040                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1041                           "fail to create rte table");
1042         return NULL;
1043 }
1044
1045 /**
1046  * Destroy flow table.
1047  *
1048  * @param[in] dev
1049  *   Pointer to the rte_eth_dev structure.
1050  * @param[in] table
1051  *   Pointer to the table to be destroyed.
1052  * @param[out] error
1053  *   Pointer to error structure.
1054  *
1055  * @return
1056  *   0 on success, a negative errno value otherwise and rte_errno is set.
1057  */
1058 static int
1059 flow_hw_table_destroy(struct rte_eth_dev *dev,
1060                       struct rte_flow_template_table *table,
1061                       struct rte_flow_error *error)
1062 {
1063         struct mlx5_priv *priv = dev->data->dev_private;
1064         int i;
1065
1066         if (table->refcnt) {
1067                 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
1068                 return rte_flow_error_set(error, EBUSY,
1069                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1070                                    NULL,
1071                                    "table in using");
1072         }
1073         LIST_REMOVE(table, next);
1074         for (i = 0; i < table->nb_item_templates; i++)
1075                 __atomic_sub_fetch(&table->its[i]->refcnt,
1076                                    1, __ATOMIC_RELAXED);
1077         for (i = 0; i < table->nb_action_templates; i++) {
1078                 if (table->ats[i].acts.mark)
1079                         flow_hw_rxq_flag_set(dev, false);
1080                 __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
1081                 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
1082                                    1, __ATOMIC_RELAXED);
1083         }
1084         mlx5dr_matcher_destroy(table->matcher);
1085         mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
1086         mlx5_ipool_destroy(table->flow);
1087         mlx5_free(table);
1088         return 0;
1089 }
1090
1091 /**
1092  * Create flow action template.
1093  *
1094  * @param[in] dev
1095  *   Pointer to the rte_eth_dev structure.
1096  * @param[in] attr
1097  *   Pointer to the action template attributes.
1098  * @param[in] actions
1099  *   Associated actions (list terminated by the END action).
1100  * @param[in] masks
1101  *   List of actions that marks which of the action's member is constant.
1102  * @param[out] error
1103  *   Pointer to error structure.
1104  *
1105  * @return
1106  *   Action template pointer on success, NULL otherwise and rte_errno is set.
1107  */
1108 static struct rte_flow_actions_template *
1109 flow_hw_actions_template_create(struct rte_eth_dev *dev,
1110                         const struct rte_flow_actions_template_attr *attr,
1111                         const struct rte_flow_action actions[],
1112                         const struct rte_flow_action masks[],
1113                         struct rte_flow_error *error)
1114 {
1115         struct mlx5_priv *priv = dev->data->dev_private;
1116         int len, act_len, mask_len, i;
1117         struct rte_flow_actions_template *at;
1118
1119         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1120                                 NULL, 0, actions, error);
1121         if (act_len <= 0)
1122                 return NULL;
1123         len = RTE_ALIGN(act_len, 16);
1124         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1125                                  NULL, 0, masks, error);
1126         if (mask_len <= 0)
1127                 return NULL;
1128         len += RTE_ALIGN(mask_len, 16);
1129         at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
1130         if (!at) {
1131                 rte_flow_error_set(error, ENOMEM,
1132                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1133                                    NULL,
1134                                    "cannot allocate action template");
1135                 return NULL;
1136         }
1137         at->attr = *attr;
1138         at->actions = (struct rte_flow_action *)(at + 1);
1139         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
1140                                 actions, error);
1141         if (act_len <= 0)
1142                 goto error;
1143         at->masks = (struct rte_flow_action *)
1144                     (((uint8_t *)at->actions) + act_len);
1145         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
1146                                  len - act_len, masks, error);
1147         if (mask_len <= 0)
1148                 goto error;
1149         /*
1150          * mlx5 PMD hacks indirect action index directly to the action conf.
1151          * The rte_flow_conv() function copies the content from conf pointer.
1152          * Need to restore the indirect action index from action conf here.
1153          */
1154         for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
1155              actions++, masks++, i++) {
1156                 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
1157                         at->actions[i].conf = actions->conf;
1158                         at->masks[i].conf = masks->conf;
1159                 }
1160         }
1161         __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
1162         LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
1163         return at;
1164 error:
1165         mlx5_free(at);
1166         return NULL;
1167 }
1168
1169 /**
1170  * Destroy flow action template.
1171  *
1172  * @param[in] dev
1173  *   Pointer to the rte_eth_dev structure.
1174  * @param[in] template
1175  *   Pointer to the action template to be destroyed.
1176  * @param[out] error
1177  *   Pointer to error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
1184                                  struct rte_flow_actions_template *template,
1185                                  struct rte_flow_error *error __rte_unused)
1186 {
1187         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1188                 DRV_LOG(WARNING, "Action template %p is still in use.",
1189                         (void *)template);
1190                 return rte_flow_error_set(error, EBUSY,
1191                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1192                                    NULL,
1193                                    "action template in using");
1194         }
1195         LIST_REMOVE(template, next);
1196         mlx5_free(template);
1197         return 0;
1198 }
1199
1200 /**
1201  * Create flow item template.
1202  *
1203  * @param[in] dev
1204  *   Pointer to the rte_eth_dev structure.
1205  * @param[in] attr
1206  *   Pointer to the item template attributes.
1207  * @param[in] items
1208  *   The template item pattern.
1209  * @param[out] error
1210  *   Pointer to error structure.
1211  *
1212  * @return
1213  *  Item template pointer on success, NULL otherwise and rte_errno is set.
1214  */
1215 static struct rte_flow_pattern_template *
1216 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
1217                              const struct rte_flow_pattern_template_attr *attr,
1218                              const struct rte_flow_item items[],
1219                              struct rte_flow_error *error)
1220 {
1221         struct mlx5_priv *priv = dev->data->dev_private;
1222         struct rte_flow_pattern_template *it;
1223
1224         it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
1225         if (!it) {
1226                 rte_flow_error_set(error, ENOMEM,
1227                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1228                                    NULL,
1229                                    "cannot allocate item template");
1230                 return NULL;
1231         }
1232         it->attr = *attr;
1233         it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
1234         if (!it->mt) {
1235                 mlx5_free(it);
1236                 rte_flow_error_set(error, rte_errno,
1237                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1238                                    NULL,
1239                                    "cannot create match template");
1240                 return NULL;
1241         }
1242         __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
1243         LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
1244         return it;
1245 }
1246
1247 /**
1248  * Destroy flow item template.
1249  *
1250  * @param[in] dev
1251  *   Pointer to the rte_eth_dev structure.
1252  * @param[in] template
1253  *   Pointer to the item template to be destroyed.
1254  * @param[out] error
1255  *   Pointer to error structure.
1256  *
1257  * @return
1258  *   0 on success, a negative errno value otherwise and rte_errno is set.
1259  */
1260 static int
1261 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
1262                               struct rte_flow_pattern_template *template,
1263                               struct rte_flow_error *error __rte_unused)
1264 {
1265         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1266                 DRV_LOG(WARNING, "Item template %p is still in use.",
1267                         (void *)template);
1268                 return rte_flow_error_set(error, EBUSY,
1269                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1270                                    NULL,
1271                                    "item template in using");
1272         }
1273         LIST_REMOVE(template, next);
1274         claim_zero(mlx5dr_match_template_destroy(template->mt));
1275         mlx5_free(template);
1276         return 0;
1277 }
1278
1279 /*
1280  * Get information about HWS pre-configurable resources.
1281  *
1282  * @param[in] dev
1283  *   Pointer to the rte_eth_dev structure.
1284  * @param[out] port_info
1285  *   Pointer to port information.
1286  * @param[out] queue_info
1287  *   Pointer to queue information.
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
1296                  struct rte_flow_port_info *port_info __rte_unused,
1297                  struct rte_flow_queue_info *queue_info __rte_unused,
1298                  struct rte_flow_error *error __rte_unused)
1299 {
1300         /* Nothing to be updated currently. */
1301         memset(port_info, 0, sizeof(*port_info));
1302         /* Queue size is unlimited from low-level. */
1303         queue_info->max_size = UINT32_MAX;
1304         return 0;
1305 }
1306
1307 /**
1308  * Create group callback.
1309  *
1310  * @param[in] tool_ctx
1311  *   Pointer to the hash list related context.
1312  * @param[in] cb_ctx
1313  *   Pointer to the group creation context.
1314  *
1315  * @return
1316  *   Group entry on success, NULL otherwise and rte_errno is set.
1317  */
1318 struct mlx5_list_entry *
1319 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
1320 {
1321         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1322         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1323         struct rte_eth_dev *dev = ctx->dev;
1324         struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
1325         struct mlx5_priv *priv = dev->data->dev_private;
1326         struct mlx5dr_table_attr dr_tbl_attr = {0};
1327         struct rte_flow_error *error = ctx->error;
1328         struct mlx5_flow_group *grp_data;
1329         struct mlx5dr_table *tbl = NULL;
1330         struct mlx5dr_action *jump;
1331         uint32_t idx = 0;
1332
1333         grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1334         if (!grp_data) {
1335                 rte_flow_error_set(error, ENOMEM,
1336                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1337                                    NULL,
1338                                    "cannot allocate flow table data entry");
1339                 return NULL;
1340         }
1341         dr_tbl_attr.level = attr->group;
1342         if (attr->transfer)
1343                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
1344         else if (attr->egress)
1345                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
1346         else
1347                 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
1348         tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
1349         if (!tbl)
1350                 goto error;
1351         grp_data->tbl = tbl;
1352         if (attr->group) {
1353                 /* Jump action be used by non-root table. */
1354                 jump = mlx5dr_action_create_dest_table
1355                         (priv->dr_ctx, tbl,
1356                          mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
1357                 if (!jump)
1358                         goto error;
1359                 grp_data->jump.hws_action = jump;
1360                 /* Jump action be used by root table.  */
1361                 jump = mlx5dr_action_create_dest_table
1362                         (priv->dr_ctx, tbl,
1363                          mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
1364                                          [dr_tbl_attr.type]);
1365                 if (!jump)
1366                         goto error;
1367                 grp_data->jump.root_action = jump;
1368         }
1369         grp_data->idx = idx;
1370         grp_data->group_id = attr->group;
1371         grp_data->type = dr_tbl_attr.type;
1372         return &grp_data->entry;
1373 error:
1374         if (grp_data->jump.root_action)
1375                 mlx5dr_action_destroy(grp_data->jump.root_action);
1376         if (grp_data->jump.hws_action)
1377                 mlx5dr_action_destroy(grp_data->jump.hws_action);
1378         if (tbl)
1379                 mlx5dr_table_destroy(tbl);
1380         if (idx)
1381                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
1382         rte_flow_error_set(error, ENOMEM,
1383                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1384                            NULL,
1385                            "cannot allocate flow dr table");
1386         return NULL;
1387 }
1388
1389 /**
1390  * Remove group callback.
1391  *
1392  * @param[in] tool_ctx
1393  *   Pointer to the hash list related context.
1394  * @param[in] entry
1395  *   Pointer to the entry to be removed.
1396  */
1397 void
1398 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1399 {
1400         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1401         struct mlx5_flow_group *grp_data =
1402                     container_of(entry, struct mlx5_flow_group, entry);
1403
1404         MLX5_ASSERT(entry && sh);
1405         /* To use the wrapper glue functions instead. */
1406         if (grp_data->jump.hws_action)
1407                 mlx5dr_action_destroy(grp_data->jump.hws_action);
1408         if (grp_data->jump.root_action)
1409                 mlx5dr_action_destroy(grp_data->jump.root_action);
1410         mlx5dr_table_destroy(grp_data->tbl);
1411         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1412 }
1413
1414 /**
1415  * Match group callback.
1416  *
1417  * @param[in] tool_ctx
1418  *   Pointer to the hash list related context.
1419  * @param[in] entry
1420  *   Pointer to the group to be matched.
1421  * @param[in] cb_ctx
1422  *   Pointer to the group matching context.
1423  *
1424  * @return
1425  *   0 on matched, 1 on miss matched.
1426  */
1427 int
1428 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
1429                      void *cb_ctx)
1430 {
1431         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1432         struct mlx5_flow_group *grp_data =
1433                 container_of(entry, struct mlx5_flow_group, entry);
1434         struct rte_flow_attr *attr =
1435                         (struct rte_flow_attr *)ctx->data;
1436
1437         return (grp_data->group_id != attr->group) ||
1438                 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
1439                 attr->transfer) ||
1440                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
1441                 attr->egress) ||
1442                 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
1443                 attr->ingress);
1444 }
1445
1446 /**
1447  * Clone group entry callback.
1448  *
1449  * @param[in] tool_ctx
1450  *   Pointer to the hash list related context.
1451  * @param[in] entry
1452  *   Pointer to the group to be matched.
1453  * @param[in] cb_ctx
1454  *   Pointer to the group matching context.
1455  *
1456  * @return
1457  *   0 on matched, 1 on miss matched.
1458  */
1459 struct mlx5_list_entry *
1460 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
1461                      void *cb_ctx)
1462 {
1463         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1464         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1465         struct mlx5_flow_group *grp_data;
1466         struct rte_flow_error *error = ctx->error;
1467         uint32_t idx = 0;
1468
1469         grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1470         if (!grp_data) {
1471                 rte_flow_error_set(error, ENOMEM,
1472                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1473                                    NULL,
1474                                    "cannot allocate flow table data entry");
1475                 return NULL;
1476         }
1477         memcpy(grp_data, oentry, sizeof(*grp_data));
1478         grp_data->idx = idx;
1479         return &grp_data->entry;
1480 }
1481
1482 /**
1483  * Free cloned group entry callback.
1484  *
1485  * @param[in] tool_ctx
1486  *   Pointer to the hash list related context.
1487  * @param[in] entry
1488  *   Pointer to the group to be freed.
1489  */
1490 void
1491 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1492 {
1493         struct mlx5_dev_ctx_shared *sh = tool_ctx;
1494         struct mlx5_flow_group *grp_data =
1495                     container_of(entry, struct mlx5_flow_group, entry);
1496
1497         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1498 }
1499
1500 /**
1501  * Configure port HWS resources.
1502  *
1503  * @param[in] dev
1504  *   Pointer to the rte_eth_dev structure.
1505  * @param[in] port_attr
1506  *   Port configuration attributes.
1507  * @param[in] nb_queue
1508  *   Number of queue.
1509  * @param[in] queue_attr
1510  *   Array that holds attributes for each flow queue.
1511  * @param[out] error
1512  *   Pointer to error structure.
1513  *
1514  * @return
1515  *   0 on success, a negative errno value otherwise and rte_errno is set.
1516  */
1517
1518 static int
1519 flow_hw_configure(struct rte_eth_dev *dev,
1520                   const struct rte_flow_port_attr *port_attr,
1521                   uint16_t nb_queue,
1522                   const struct rte_flow_queue_attr *queue_attr[],
1523                   struct rte_flow_error *error)
1524 {
1525         struct mlx5_priv *priv = dev->data->dev_private;
1526         struct mlx5dr_context *dr_ctx = NULL;
1527         struct mlx5dr_context_attr dr_ctx_attr = {0};
1528         struct mlx5_hw_q *hw_q;
1529         struct mlx5_hw_q_job *job = NULL;
1530         uint32_t mem_size, i, j;
1531         struct mlx5_indexed_pool_config cfg = {
1532                 .size = sizeof(struct rte_flow_hw),
1533                 .trunk_size = 4096,
1534                 .need_lock = 1,
1535                 .release_mem_en = !!priv->sh->config.reclaim_mode,
1536                 .malloc = mlx5_malloc,
1537                 .free = mlx5_free,
1538                 .type = "mlx5_hw_action_construct_data",
1539         };
1540
1541         if (!port_attr || !nb_queue || !queue_attr) {
1542                 rte_errno = EINVAL;
1543                 goto err;
1544         }
1545         /* In case re-configuring, release existing context at first. */
1546         if (priv->dr_ctx) {
1547                 /* */
1548                 for (i = 0; i < nb_queue; i++) {
1549                         hw_q = &priv->hw_q[i];
1550                         /* Make sure all queues are empty. */
1551                         if (hw_q->size != hw_q->job_idx) {
1552                                 rte_errno = EBUSY;
1553                                 goto err;
1554                         }
1555                 }
1556                 flow_hw_resource_release(dev);
1557         }
1558         priv->acts_ipool = mlx5_ipool_create(&cfg);
1559         if (!priv->acts_ipool)
1560                 goto err;
1561         /* Allocate the queue job descriptor LIFO. */
1562         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
1563         for (i = 0; i < nb_queue; i++) {
1564                 /*
1565                  * Check if the queues' size are all the same as the
1566                  * limitation from HWS layer.
1567                  */
1568                 if (queue_attr[i]->size != queue_attr[0]->size) {
1569                         rte_errno = EINVAL;
1570                         goto err;
1571                 }
1572                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
1573                             sizeof(struct mlx5_hw_q_job)) *
1574                             queue_attr[0]->size;
1575         }
1576         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
1577                                  64, SOCKET_ID_ANY);
1578         if (!priv->hw_q) {
1579                 rte_errno = ENOMEM;
1580                 goto err;
1581         }
1582         for (i = 0; i < nb_queue; i++) {
1583                 priv->hw_q[i].job_idx = queue_attr[i]->size;
1584                 priv->hw_q[i].size = queue_attr[i]->size;
1585                 if (i == 0)
1586                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1587                                             &priv->hw_q[nb_queue];
1588                 else
1589                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1590                                             &job[queue_attr[i - 1]->size];
1591                 job = (struct mlx5_hw_q_job *)
1592                       &priv->hw_q[i].job[queue_attr[i]->size];
1593                 for (j = 0; j < queue_attr[i]->size; j++)
1594                         priv->hw_q[i].job[j] = &job[j];
1595         }
1596         dr_ctx_attr.pd = priv->sh->cdev->pd;
1597         dr_ctx_attr.queues = nb_queue;
1598         /* Queue size should all be the same. Take the first one. */
1599         dr_ctx_attr.queue_size = queue_attr[0]->size;
1600         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
1601         /* rte_errno has been updated by HWS layer. */
1602         if (!dr_ctx)
1603                 goto err;
1604         priv->dr_ctx = dr_ctx;
1605         priv->nb_queue = nb_queue;
1606         /* Add global actions. */
1607         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1608                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1609                         priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
1610                                 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
1611                         if (!priv->hw_drop[i][j])
1612                                 goto err;
1613                 }
1614                 priv->hw_tag[i] = mlx5dr_action_create_tag
1615                         (priv->dr_ctx, mlx5_hw_act_flag[i][0]);
1616                 if (!priv->hw_tag[i])
1617                         goto err;
1618         }
1619         return 0;
1620 err:
1621         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1622                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1623                         if (priv->hw_drop[i][j])
1624                                 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1625                 }
1626                 if (priv->hw_tag[i])
1627                         mlx5dr_action_destroy(priv->hw_tag[i]);
1628         }
1629         if (dr_ctx)
1630                 claim_zero(mlx5dr_context_close(dr_ctx));
1631         mlx5_free(priv->hw_q);
1632         priv->hw_q = NULL;
1633         if (priv->acts_ipool) {
1634                 mlx5_ipool_destroy(priv->acts_ipool);
1635                 priv->acts_ipool = NULL;
1636         }
1637         return rte_flow_error_set(error, rte_errno,
1638                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1639                                   "fail to configure port");
1640 }
1641
1642 /**
1643  * Release HWS resources.
1644  *
1645  * @param[in] dev
1646  *   Pointer to the rte_eth_dev structure.
1647  */
1648 void
1649 flow_hw_resource_release(struct rte_eth_dev *dev)
1650 {
1651         struct mlx5_priv *priv = dev->data->dev_private;
1652         struct rte_flow_template_table *tbl;
1653         struct rte_flow_pattern_template *it;
1654         struct rte_flow_actions_template *at;
1655         int i, j;
1656
1657         if (!priv->dr_ctx)
1658                 return;
1659         while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
1660                 tbl = LIST_FIRST(&priv->flow_hw_tbl);
1661                 flow_hw_table_destroy(dev, tbl, NULL);
1662         }
1663         while (!LIST_EMPTY(&priv->flow_hw_itt)) {
1664                 it = LIST_FIRST(&priv->flow_hw_itt);
1665                 flow_hw_pattern_template_destroy(dev, it, NULL);
1666         }
1667         while (!LIST_EMPTY(&priv->flow_hw_at)) {
1668                 at = LIST_FIRST(&priv->flow_hw_at);
1669                 flow_hw_actions_template_destroy(dev, at, NULL);
1670         }
1671         for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1672                 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1673                         if (priv->hw_drop[i][j])
1674                                 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1675                 }
1676                 if (priv->hw_tag[i])
1677                         mlx5dr_action_destroy(priv->hw_tag[i]);
1678         }
1679         if (priv->acts_ipool) {
1680                 mlx5_ipool_destroy(priv->acts_ipool);
1681                 priv->acts_ipool = NULL;
1682         }
1683         mlx5_free(priv->hw_q);
1684         priv->hw_q = NULL;
1685         claim_zero(mlx5dr_context_close(priv->dr_ctx));
1686         priv->dr_ctx = NULL;
1687         priv->nb_queue = 0;
1688 }
1689
1690 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
1691         .info_get = flow_hw_info_get,
1692         .configure = flow_hw_configure,
1693         .pattern_template_create = flow_hw_pattern_template_create,
1694         .pattern_template_destroy = flow_hw_pattern_template_destroy,
1695         .actions_template_create = flow_hw_actions_template_create,
1696         .actions_template_destroy = flow_hw_actions_template_destroy,
1697         .template_table_create = flow_hw_table_create,
1698         .template_table_destroy = flow_hw_table_destroy,
1699         .async_flow_create = flow_hw_async_flow_create,
1700         .async_flow_destroy = flow_hw_async_flow_destroy,
1701         .pull = flow_hw_pull,
1702         .push = flow_hw_push,
1703 };
1704
1705 #endif