1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
12 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
14 /* The maximum actions support in the flow. */
15 #define MLX5_HW_MAX_ACTS 16
17 /* Default push burst threshold. */
20 /* Default queue to flush the flows. */
21 #define MLX5_DEFAULT_FLUSH_QUEUE 0
23 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
25 /* DR action flags with different table. */
26 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
27 [MLX5DR_TABLE_TYPE_MAX] = {
29 MLX5DR_ACTION_FLAG_ROOT_RX,
30 MLX5DR_ACTION_FLAG_ROOT_TX,
31 MLX5DR_ACTION_FLAG_ROOT_FDB,
34 MLX5DR_ACTION_FLAG_HWS_RX,
35 MLX5DR_ACTION_FLAG_HWS_TX,
36 MLX5DR_ACTION_FLAG_HWS_FDB,
44 * Pointer to the rte_eth_dev structure.
46 * Flag to enable or not.
49 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
51 struct mlx5_priv *priv = dev->data->dev_private;
54 if ((!priv->mark_enabled && !enable) ||
55 (priv->mark_enabled && enable))
57 for (i = 0; i < priv->rxqs_n; ++i) {
58 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
60 rxq_ctrl->rxq.mark = enable;
62 priv->mark_enabled = enable;
66 * Register destination table DR jump action.
69 * Pointer to the rte_eth_dev structure.
70 * @param[in] table_attr
71 * Pointer to the flow attributes.
72 * @param[in] dest_group
73 * The destination group ID.
75 * Pointer to error structure.
78 * Table on success, NULL otherwise and rte_errno is set.
80 static struct mlx5_hw_jump_action *
81 flow_hw_jump_action_register(struct rte_eth_dev *dev,
82 const struct rte_flow_attr *attr,
84 struct rte_flow_error *error)
86 struct mlx5_priv *priv = dev->data->dev_private;
87 struct rte_flow_attr jattr = *attr;
88 struct mlx5_flow_group *grp;
89 struct mlx5_flow_cb_ctx ctx = {
94 struct mlx5_list_entry *ge;
96 jattr.group = dest_group;
97 ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);
100 grp = container_of(ge, struct mlx5_flow_group, entry);
105 * Release jump action.
108 * Pointer to the rte_eth_dev structure.
110 * Pointer to the jump action.
114 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
116 struct mlx5_priv *priv = dev->data->dev_private;
117 struct mlx5_flow_group *grp;
120 (jump, struct mlx5_flow_group, jump);
121 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
125 * Register queue/RSS action.
128 * Pointer to the rte_eth_dev structure.
129 * @param[in] hws_flags
135 * Table on success, NULL otherwise and rte_errno is set.
137 static inline struct mlx5_hrxq*
138 flow_hw_tir_action_register(struct rte_eth_dev *dev,
140 const struct rte_flow_action *action)
142 struct mlx5_flow_rss_desc rss_desc = {
143 .hws_flags = hws_flags,
145 struct mlx5_hrxq *hrxq;
147 if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
148 const struct rte_flow_action_queue *queue = action->conf;
150 rss_desc.const_q = &queue->index;
151 rss_desc.queue_num = 1;
153 const struct rte_flow_action_rss *rss = action->conf;
155 rss_desc.queue_num = rss->queue_num;
156 rss_desc.const_q = rss->queue;
158 !rss->key ? rss_hash_default_key : rss->key,
159 MLX5_RSS_HASH_KEY_LEN);
160 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
161 rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
162 flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
163 flow_dv_action_rss_l34_hash_adjust(rss->types,
164 &rss_desc.hash_fields);
165 if (rss->level > 1) {
166 rss_desc.hash_fields |= IBV_RX_HASH_INNER;
170 hrxq = mlx5_hrxq_get(dev, &rss_desc);
175 * Destroy DR actions created by action template.
177 * For DR actions created during table creation's action translate.
178 * Need to destroy the DR action when destroying the table.
181 * Pointer to the rte_eth_dev structure.
183 * Pointer to the template HW steering DR actions.
186 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
187 struct mlx5_hw_actions *acts)
189 struct mlx5_priv *priv = dev->data->dev_private;
192 struct mlx5_flow_group *grp;
195 (acts->jump, struct mlx5_flow_group, jump);
196 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
202 * Append dynamic action to the dynamic action list.
205 * Pointer to the port private data structure.
207 * Pointer to the template HW steering DR actions.
210 * @param[in] action_src
211 * Offset of source rte flow action.
212 * @param[in] action_dst
213 * Offset of destination DR action.
216 * 0 on success, negative value otherwise and rte_errno is set.
218 static __rte_always_inline struct mlx5_action_construct_data *
219 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
220 enum rte_flow_action_type type,
224 struct mlx5_action_construct_data *act_data;
227 act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
231 act_data->type = type;
232 act_data->action_src = action_src;
233 act_data->action_dst = action_dst;
238 * Append dynamic action to the dynamic action list.
241 * Pointer to the port private data structure.
243 * Pointer to the template HW steering DR actions.
246 * @param[in] action_src
247 * Offset of source rte flow action.
248 * @param[in] action_dst
249 * Offset of destination DR action.
252 * 0 on success, negative value otherwise and rte_errno is set.
254 static __rte_always_inline int
255 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
256 struct mlx5_hw_actions *acts,
257 enum rte_flow_action_type type,
260 { struct mlx5_action_construct_data *act_data;
262 act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
265 LIST_INSERT_HEAD(&acts->act_list, act_data, next);
270 * Translate rte_flow actions to DR action.
272 * As the action template has already indicated the actions. Translate
273 * the rte_flow actions to DR action if possbile. So in flow create
274 * stage we will save cycles from handing the actions' organizing.
275 * For the actions with limited information, need to add these to a
279 * Pointer to the rte_eth_dev structure.
280 * @param[in] table_attr
281 * Pointer to the table attributes.
282 * @param[in] item_templates
283 * Item template array to be binded to the table.
284 * @param[in/out] acts
285 * Pointer to the template HW steering DR actions.
289 * Pointer to error structure.
292 * Table on success, NULL otherwise and rte_errno is set.
295 flow_hw_actions_translate(struct rte_eth_dev *dev,
296 const struct rte_flow_template_table_attr *table_attr,
297 struct mlx5_hw_actions *acts,
298 struct rte_flow_actions_template *at,
299 struct rte_flow_error *error)
301 struct mlx5_priv *priv = dev->data->dev_private;
302 const struct rte_flow_attr *attr = &table_attr->flow_attr;
303 struct rte_flow_action *actions = at->actions;
304 struct rte_flow_action *action_start = actions;
305 struct rte_flow_action *masks = at->masks;
306 bool actions_end = false;
311 type = MLX5DR_TABLE_TYPE_FDB;
312 else if (attr->egress)
313 type = MLX5DR_TABLE_TYPE_NIC_TX;
315 type = MLX5DR_TABLE_TYPE_NIC_RX;
316 for (i = 0; !actions_end; actions++, masks++) {
317 switch (actions->type) {
318 case RTE_FLOW_ACTION_TYPE_INDIRECT:
320 case RTE_FLOW_ACTION_TYPE_VOID:
322 case RTE_FLOW_ACTION_TYPE_DROP:
323 acts->rule_acts[i++].action =
324 priv->hw_drop[!!attr->group][type];
326 case RTE_FLOW_ACTION_TYPE_MARK:
329 acts->rule_acts[i].tag.value =
331 (((const struct rte_flow_action_mark *)
333 else if (__flow_hw_act_data_general_append(priv, acts,
334 actions->type, actions - action_start, i))
336 acts->rule_acts[i++].action =
337 priv->hw_tag[!!attr->group];
338 flow_hw_rxq_flag_set(dev, true);
340 case RTE_FLOW_ACTION_TYPE_JUMP:
342 uint32_t jump_group =
343 ((const struct rte_flow_action_jump *)
344 actions->conf)->group;
345 acts->jump = flow_hw_jump_action_register
346 (dev, attr, jump_group, error);
349 acts->rule_acts[i].action = (!!attr->group) ?
350 acts->jump->hws_action :
351 acts->jump->root_action;
352 } else if (__flow_hw_act_data_general_append
353 (priv, acts, actions->type,
354 actions - action_start, i)){
359 case RTE_FLOW_ACTION_TYPE_QUEUE:
361 acts->tir = flow_hw_tir_action_register
363 mlx5_hw_act_flag[!!attr->group][type],
367 acts->rule_acts[i].action =
369 } else if (__flow_hw_act_data_general_append
370 (priv, acts, actions->type,
371 actions - action_start, i)) {
376 case RTE_FLOW_ACTION_TYPE_RSS:
378 acts->tir = flow_hw_tir_action_register
380 mlx5_hw_act_flag[!!attr->group][type],
384 acts->rule_acts[i].action =
386 } else if (__flow_hw_act_data_general_append
387 (priv, acts, actions->type,
388 actions - action_start, i)) {
393 case RTE_FLOW_ACTION_TYPE_END:
404 __flow_hw_action_template_destroy(dev, acts);
405 return rte_flow_error_set(error, err,
406 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
407 "fail to create rte table");
411 * Construct flow action array.
413 * For action template contains dynamic actions, these actions need to
414 * be updated according to the rte_flow action during flow creation.
417 * Pointer to the rte_eth_dev structure.
419 * Pointer to job descriptor.
421 * Pointer to translated actions from template.
423 * Array of rte_flow action need to be checked.
424 * @param[in] rule_acts
425 * Array of DR rule actions to be used during flow creation..
426 * @param[in] acts_num
427 * Pointer to the real acts_num flow has.
430 * 0 on success, negative value otherwise and rte_errno is set.
432 static __rte_always_inline int
433 flow_hw_actions_construct(struct rte_eth_dev *dev,
434 struct mlx5_hw_q_job *job,
435 struct mlx5_hw_actions *hw_acts,
436 const struct rte_flow_action actions[],
437 struct mlx5dr_rule_action *rule_acts,
440 struct rte_flow_template_table *table = job->flow->table;
441 struct mlx5_action_construct_data *act_data;
442 const struct rte_flow_action *action;
443 struct rte_flow_attr attr = {
448 memcpy(rule_acts, hw_acts->rule_acts,
449 sizeof(*rule_acts) * hw_acts->acts_num);
450 *acts_num = hw_acts->acts_num;
451 if (LIST_EMPTY(&hw_acts->act_list))
453 attr.group = table->grp->group_id;
454 ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
455 if (table->type == MLX5DR_TABLE_TYPE_FDB) {
458 } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
464 LIST_FOREACH(act_data, &hw_acts->act_list, next) {
467 struct mlx5_hw_jump_action *jump;
468 struct mlx5_hrxq *hrxq;
470 action = &actions[act_data->action_src];
471 MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
472 (int)action->type == act_data->type);
473 switch (action->type) {
474 case RTE_FLOW_ACTION_TYPE_INDIRECT:
476 case RTE_FLOW_ACTION_TYPE_VOID:
478 case RTE_FLOW_ACTION_TYPE_MARK:
479 tag = mlx5_flow_mark_set
480 (((const struct rte_flow_action_mark *)
481 (action->conf))->id);
482 rule_acts[act_data->action_dst].tag.value = tag;
484 case RTE_FLOW_ACTION_TYPE_JUMP:
485 jump_group = ((const struct rte_flow_action_jump *)
486 action->conf)->group;
487 jump = flow_hw_jump_action_register
488 (dev, &attr, jump_group, NULL);
491 rule_acts[act_data->action_dst].action =
492 (!!attr.group) ? jump->hws_action : jump->root_action;
493 job->flow->jump = jump;
494 job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
496 case RTE_FLOW_ACTION_TYPE_RSS:
497 case RTE_FLOW_ACTION_TYPE_QUEUE:
498 hrxq = flow_hw_tir_action_register(dev,
503 rule_acts[act_data->action_dst].action = hrxq->action;
504 job->flow->hrxq = hrxq;
505 job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
515 * Enqueue HW steering flow creation.
517 * The flow will be applied to the HW only if the postpone bit is not set or
518 * the extra push function is called.
519 * The flow creation status should be checked from dequeue result.
522 * Pointer to the rte_eth_dev structure.
524 * The queue to create the flow.
526 * Pointer to the flow operation attributes.
528 * Items with flow spec value.
529 * @param[in] pattern_template_index
530 * The item pattern flow follows from the table.
532 * Action with flow spec value.
533 * @param[in] action_template_index
534 * The action pattern flow follows from the table.
535 * @param[in] user_data
536 * Pointer to the user_data.
538 * Pointer to error structure.
541 * Flow pointer on success, NULL otherwise and rte_errno is set.
543 static struct rte_flow *
544 flow_hw_async_flow_create(struct rte_eth_dev *dev,
546 const struct rte_flow_op_attr *attr,
547 struct rte_flow_template_table *table,
548 const struct rte_flow_item items[],
549 uint8_t pattern_template_index,
550 const struct rte_flow_action actions[],
551 uint8_t action_template_index,
553 struct rte_flow_error *error)
555 struct mlx5_priv *priv = dev->data->dev_private;
556 struct mlx5dr_rule_attr rule_attr = {
558 .user_data = user_data,
559 .burst = attr->postpone,
561 struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
562 struct mlx5_hw_actions *hw_acts;
563 struct rte_flow_hw *flow;
564 struct mlx5_hw_q_job *job;
565 uint32_t acts_num, flow_idx;
568 if (unlikely(!priv->hw_q[queue].job_idx)) {
572 flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
576 * Set the table here in order to know the destination table
577 * when free the flow afterwards.
580 flow->idx = flow_idx;
581 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
583 * Set the job type here in order to know if the flow memory
584 * should be freed or not when get the result from dequeue.
586 job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
588 job->user_data = user_data;
589 rule_attr.user_data = job;
590 hw_acts = &table->ats[action_template_index].acts;
591 /* Construct the flow action array based on the input actions.*/
592 flow_hw_actions_construct(dev, job, hw_acts, actions,
593 rule_acts, &acts_num);
594 ret = mlx5dr_rule_create(table->matcher,
595 pattern_template_index, items,
597 &rule_attr, &flow->rule);
599 return (struct rte_flow *)flow;
600 /* Flow created fail, return the descriptor and flow memory. */
601 mlx5_ipool_free(table->flow, flow_idx);
602 priv->hw_q[queue].job_idx++;
604 rte_flow_error_set(error, rte_errno,
605 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
606 "fail to create rte flow");
611 * Enqueue HW steering flow destruction.
613 * The flow will be applied to the HW only if the postpone bit is not set or
614 * the extra push function is called.
615 * The flow destruction status should be checked from dequeue result.
618 * Pointer to the rte_eth_dev structure.
620 * The queue to destroy the flow.
622 * Pointer to the flow operation attributes.
624 * Pointer to the flow to be destroyed.
625 * @param[in] user_data
626 * Pointer to the user_data.
628 * Pointer to error structure.
631 * 0 on success, negative value otherwise and rte_errno is set.
634 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
636 const struct rte_flow_op_attr *attr,
637 struct rte_flow *flow,
639 struct rte_flow_error *error)
641 struct mlx5_priv *priv = dev->data->dev_private;
642 struct mlx5dr_rule_attr rule_attr = {
644 .user_data = user_data,
645 .burst = attr->postpone,
647 struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
648 struct mlx5_hw_q_job *job;
651 if (unlikely(!priv->hw_q[queue].job_idx)) {
655 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
656 job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
657 job->user_data = user_data;
659 rule_attr.user_data = job;
660 ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
663 priv->hw_q[queue].job_idx++;
665 return rte_flow_error_set(error, rte_errno,
666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
667 "fail to create rte flow");
671 * Pull the enqueued flows.
673 * For flows enqueued from creation/destruction, the status should be
674 * checked from the dequeue result.
677 * Pointer to the rte_eth_dev structure.
679 * The queue to pull the result.
681 * Array to save the results.
683 * Available result with the array.
685 * Pointer to error structure.
688 * Result number on success, negative value otherwise and rte_errno is set.
691 flow_hw_pull(struct rte_eth_dev *dev,
693 struct rte_flow_op_result res[],
695 struct rte_flow_error *error)
697 struct mlx5_priv *priv = dev->data->dev_private;
698 struct mlx5_hw_q_job *job;
701 ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
703 return rte_flow_error_set(error, rte_errno,
704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
705 "fail to query flow queue");
706 for (i = 0; i < ret; i++) {
707 job = (struct mlx5_hw_q_job *)res[i].user_data;
708 /* Restore user data. */
709 res[i].user_data = job->user_data;
710 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
711 if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
712 flow_hw_jump_release(dev, job->flow->jump);
713 else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
714 mlx5_hrxq_obj_release(dev, job->flow->hrxq);
715 mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
717 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
723 * Push the enqueued flows to HW.
725 * Force apply all the enqueued flows to the HW.
728 * Pointer to the rte_eth_dev structure.
730 * The queue to push the flow.
732 * Pointer to error structure.
735 * 0 on success, negative value otherwise and rte_errno is set.
738 flow_hw_push(struct rte_eth_dev *dev,
740 struct rte_flow_error *error)
742 struct mlx5_priv *priv = dev->data->dev_private;
745 ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
746 MLX5DR_SEND_QUEUE_ACTION_DRAIN);
748 rte_flow_error_set(error, rte_errno,
749 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
750 "fail to push flows");
757 * Drain the enqueued flows' completion.
760 * Pointer to the rte_eth_dev structure.
762 * The queue to pull the flow.
763 * @param[in] pending_rules
764 * The pending flow number.
766 * Pointer to error structure.
769 * 0 on success, negative value otherwise and rte_errno is set.
772 __flow_hw_pull_comp(struct rte_eth_dev *dev,
774 uint32_t pending_rules,
775 struct rte_flow_error *error)
777 struct rte_flow_op_result comp[BURST_THR];
778 int ret, i, empty_loop = 0;
780 flow_hw_push(dev, queue, error);
781 while (pending_rules) {
782 ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
786 rte_delay_us_sleep(20000);
787 if (++empty_loop > 5) {
788 DRV_LOG(WARNING, "No available dequeue, quit.");
793 for (i = 0; i < ret; i++) {
794 if (comp[i].status == RTE_FLOW_OP_ERROR)
795 DRV_LOG(WARNING, "Flow flush get error CQE.");
797 if ((uint32_t)ret > pending_rules) {
798 DRV_LOG(WARNING, "Flow flush get extra CQE.");
799 return rte_flow_error_set(error, ERANGE,
800 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
803 pending_rules -= ret;
810 * Flush created flows.
813 * Pointer to the rte_eth_dev structure.
815 * Pointer to error structure.
818 * 0 on success, negative value otherwise and rte_errno is set.
821 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
822 struct rte_flow_error *error)
824 struct mlx5_priv *priv = dev->data->dev_private;
825 struct mlx5_hw_q *hw_q;
826 struct rte_flow_template_table *tbl;
827 struct rte_flow_hw *flow;
828 struct rte_flow_op_attr attr = {
831 uint32_t pending_rules = 0;
836 * Ensure to push and dequeue all the enqueued flow
837 * creation/destruction jobs in case user forgot to
838 * dequeue. Or the enqueued created flows will be
839 * leaked. The forgotten dequeues would also cause
840 * flow flush get extra CQEs as expected and pending_rules
843 for (queue = 0; queue < priv->nb_queue; queue++) {
844 hw_q = &priv->hw_q[queue];
845 if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
849 /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
850 hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
851 LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
852 MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
853 if (flow_hw_async_flow_destroy(dev,
854 MLX5_DEFAULT_FLUSH_QUEUE,
856 (struct rte_flow *)flow,
861 /* Drain completion with queue size. */
862 if (pending_rules >= hw_q->size) {
863 if (__flow_hw_pull_comp(dev,
864 MLX5_DEFAULT_FLUSH_QUEUE,
865 pending_rules, error))
871 /* Drain left completion. */
873 __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
882 * The input item and action templates will be binded to the table.
883 * Flow memory will also be allocated. Matcher will be created based
884 * on the item template. Action will be translated to the dedicated
885 * DR action if possible.
888 * Pointer to the rte_eth_dev structure.
890 * Pointer to the table attributes.
891 * @param[in] item_templates
892 * Item template array to be binded to the table.
893 * @param[in] nb_item_templates
894 * Number of item template.
895 * @param[in] action_templates
896 * Action template array to be binded to the table.
897 * @param[in] nb_action_templates
898 * Number of action template.
900 * Pointer to error structure.
903 * Table on success, NULL otherwise and rte_errno is set.
905 static struct rte_flow_template_table *
906 flow_hw_table_create(struct rte_eth_dev *dev,
907 const struct rte_flow_template_table_attr *attr,
908 struct rte_flow_pattern_template *item_templates[],
909 uint8_t nb_item_templates,
910 struct rte_flow_actions_template *action_templates[],
911 uint8_t nb_action_templates,
912 struct rte_flow_error *error)
914 struct mlx5_priv *priv = dev->data->dev_private;
915 struct mlx5dr_matcher_attr matcher_attr = {0};
916 struct rte_flow_template_table *tbl = NULL;
917 struct mlx5_flow_group *grp;
918 struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
919 struct rte_flow_attr flow_attr = attr->flow_attr;
920 struct mlx5_flow_cb_ctx ctx = {
925 struct mlx5_indexed_pool_config cfg = {
926 .size = sizeof(struct rte_flow_hw),
927 .trunk_size = 1 << 12,
928 .per_core_cache = 1 << 13,
930 .release_mem_en = !!priv->sh->config.reclaim_mode,
931 .malloc = mlx5_malloc,
933 .type = "mlx5_hw_table_flow",
935 struct mlx5_list_entry *ge;
936 uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
937 uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
940 /* HWS layer accepts only 1 item template with root table. */
941 if (!attr->flow_attr.group)
943 cfg.max_idx = nb_flows;
944 /* For table has very limited flows, disable cache. */
945 if (nb_flows < cfg.trunk_size) {
946 cfg.per_core_cache = 0;
947 cfg.trunk_size = nb_flows;
949 /* Check if we requires too many templates. */
950 if (nb_item_templates > max_tpl ||
951 nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
955 /* Allocate the table memory. */
956 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
959 /* Allocate flow indexed pool. */
960 tbl->flow = mlx5_ipool_create(&cfg);
963 /* Register the flow group. */
964 ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
967 grp = container_of(ge, struct mlx5_flow_group, entry);
969 /* Prepare matcher information. */
970 matcher_attr.priority = attr->flow_attr.priority;
971 matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
972 matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
973 /* Build the item template. */
974 for (i = 0; i < nb_item_templates; i++) {
977 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
983 mt[i] = item_templates[i]->mt;
984 tbl->its[i] = item_templates[i];
986 tbl->matcher = mlx5dr_matcher_create
987 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
990 tbl->nb_item_templates = nb_item_templates;
991 /* Build the action template. */
992 for (i = 0; i < nb_action_templates; i++) {
995 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
1001 LIST_INIT(&tbl->ats[i].acts.act_list);
1002 err = flow_hw_actions_translate(dev, attr,
1004 action_templates[i], error);
1009 tbl->ats[i].action_template = action_templates[i];
1011 tbl->nb_action_templates = nb_action_templates;
1012 tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
1013 (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
1014 MLX5DR_TABLE_TYPE_NIC_RX);
1015 LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
1019 __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
1020 __atomic_sub_fetch(&action_templates[i]->refcnt,
1021 1, __ATOMIC_RELAXED);
1023 i = nb_item_templates;
1026 __atomic_sub_fetch(&item_templates[i]->refcnt,
1027 1, __ATOMIC_RELAXED);
1028 mlx5dr_matcher_destroy(tbl->matcher);
1033 mlx5_hlist_unregister(priv->sh->groups,
1036 mlx5_ipool_destroy(tbl->flow);
1039 rte_flow_error_set(error, err,
1040 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1041 "fail to create rte table");
1046 * Destroy flow table.
1049 * Pointer to the rte_eth_dev structure.
1051 * Pointer to the table to be destroyed.
1053 * Pointer to error structure.
1056 * 0 on success, a negative errno value otherwise and rte_errno is set.
1059 flow_hw_table_destroy(struct rte_eth_dev *dev,
1060 struct rte_flow_template_table *table,
1061 struct rte_flow_error *error)
1063 struct mlx5_priv *priv = dev->data->dev_private;
1066 if (table->refcnt) {
1067 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
1068 return rte_flow_error_set(error, EBUSY,
1069 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1073 LIST_REMOVE(table, next);
1074 for (i = 0; i < table->nb_item_templates; i++)
1075 __atomic_sub_fetch(&table->its[i]->refcnt,
1076 1, __ATOMIC_RELAXED);
1077 for (i = 0; i < table->nb_action_templates; i++) {
1078 if (table->ats[i].acts.mark)
1079 flow_hw_rxq_flag_set(dev, false);
1080 __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
1081 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
1082 1, __ATOMIC_RELAXED);
1084 mlx5dr_matcher_destroy(table->matcher);
1085 mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
1086 mlx5_ipool_destroy(table->flow);
1092 * Create flow action template.
1095 * Pointer to the rte_eth_dev structure.
1097 * Pointer to the action template attributes.
1098 * @param[in] actions
1099 * Associated actions (list terminated by the END action).
1101 * List of actions that marks which of the action's member is constant.
1103 * Pointer to error structure.
1106 * Action template pointer on success, NULL otherwise and rte_errno is set.
1108 static struct rte_flow_actions_template *
1109 flow_hw_actions_template_create(struct rte_eth_dev *dev,
1110 const struct rte_flow_actions_template_attr *attr,
1111 const struct rte_flow_action actions[],
1112 const struct rte_flow_action masks[],
1113 struct rte_flow_error *error)
1115 struct mlx5_priv *priv = dev->data->dev_private;
1116 int len, act_len, mask_len, i;
1117 struct rte_flow_actions_template *at;
1119 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1120 NULL, 0, actions, error);
1123 len = RTE_ALIGN(act_len, 16);
1124 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1125 NULL, 0, masks, error);
1128 len += RTE_ALIGN(mask_len, 16);
1129 at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
1131 rte_flow_error_set(error, ENOMEM,
1132 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1134 "cannot allocate action template");
1138 at->actions = (struct rte_flow_action *)(at + 1);
1139 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
1143 at->masks = (struct rte_flow_action *)
1144 (((uint8_t *)at->actions) + act_len);
1145 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
1146 len - act_len, masks, error);
1150 * mlx5 PMD hacks indirect action index directly to the action conf.
1151 * The rte_flow_conv() function copies the content from conf pointer.
1152 * Need to restore the indirect action index from action conf here.
1154 for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
1155 actions++, masks++, i++) {
1156 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
1157 at->actions[i].conf = actions->conf;
1158 at->masks[i].conf = masks->conf;
1161 __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
1162 LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
1170 * Destroy flow action template.
1173 * Pointer to the rte_eth_dev structure.
1174 * @param[in] template
1175 * Pointer to the action template to be destroyed.
1177 * Pointer to error structure.
1180 * 0 on success, a negative errno value otherwise and rte_errno is set.
1183 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
1184 struct rte_flow_actions_template *template,
1185 struct rte_flow_error *error __rte_unused)
1187 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1188 DRV_LOG(WARNING, "Action template %p is still in use.",
1190 return rte_flow_error_set(error, EBUSY,
1191 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1193 "action template in using");
1195 LIST_REMOVE(template, next);
1196 mlx5_free(template);
1201 * Create flow item template.
1204 * Pointer to the rte_eth_dev structure.
1206 * Pointer to the item template attributes.
1208 * The template item pattern.
1210 * Pointer to error structure.
1213 * Item template pointer on success, NULL otherwise and rte_errno is set.
1215 static struct rte_flow_pattern_template *
1216 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
1217 const struct rte_flow_pattern_template_attr *attr,
1218 const struct rte_flow_item items[],
1219 struct rte_flow_error *error)
1221 struct mlx5_priv *priv = dev->data->dev_private;
1222 struct rte_flow_pattern_template *it;
1224 it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
1226 rte_flow_error_set(error, ENOMEM,
1227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1229 "cannot allocate item template");
1233 it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
1236 rte_flow_error_set(error, rte_errno,
1237 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1239 "cannot create match template");
1242 __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
1243 LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
1248 * Destroy flow item template.
1251 * Pointer to the rte_eth_dev structure.
1252 * @param[in] template
1253 * Pointer to the item template to be destroyed.
1255 * Pointer to error structure.
1258 * 0 on success, a negative errno value otherwise and rte_errno is set.
1261 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
1262 struct rte_flow_pattern_template *template,
1263 struct rte_flow_error *error __rte_unused)
1265 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1266 DRV_LOG(WARNING, "Item template %p is still in use.",
1268 return rte_flow_error_set(error, EBUSY,
1269 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1271 "item template in using");
1273 LIST_REMOVE(template, next);
1274 claim_zero(mlx5dr_match_template_destroy(template->mt));
1275 mlx5_free(template);
1280 * Get information about HWS pre-configurable resources.
1283 * Pointer to the rte_eth_dev structure.
1284 * @param[out] port_info
1285 * Pointer to port information.
1286 * @param[out] queue_info
1287 * Pointer to queue information.
1289 * Pointer to error structure.
1292 * 0 on success, a negative errno value otherwise and rte_errno is set.
1295 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
1296 struct rte_flow_port_info *port_info __rte_unused,
1297 struct rte_flow_queue_info *queue_info __rte_unused,
1298 struct rte_flow_error *error __rte_unused)
1300 /* Nothing to be updated currently. */
1301 memset(port_info, 0, sizeof(*port_info));
1302 /* Queue size is unlimited from low-level. */
1303 queue_info->max_size = UINT32_MAX;
1308 * Create group callback.
1310 * @param[in] tool_ctx
1311 * Pointer to the hash list related context.
1313 * Pointer to the group creation context.
1316 * Group entry on success, NULL otherwise and rte_errno is set.
1318 struct mlx5_list_entry *
1319 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
1321 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1322 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1323 struct rte_eth_dev *dev = ctx->dev;
1324 struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
1325 struct mlx5_priv *priv = dev->data->dev_private;
1326 struct mlx5dr_table_attr dr_tbl_attr = {0};
1327 struct rte_flow_error *error = ctx->error;
1328 struct mlx5_flow_group *grp_data;
1329 struct mlx5dr_table *tbl = NULL;
1330 struct mlx5dr_action *jump;
1333 grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1335 rte_flow_error_set(error, ENOMEM,
1336 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1338 "cannot allocate flow table data entry");
1341 dr_tbl_attr.level = attr->group;
1343 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
1344 else if (attr->egress)
1345 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
1347 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
1348 tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
1351 grp_data->tbl = tbl;
1353 /* Jump action be used by non-root table. */
1354 jump = mlx5dr_action_create_dest_table
1356 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
1359 grp_data->jump.hws_action = jump;
1360 /* Jump action be used by root table. */
1361 jump = mlx5dr_action_create_dest_table
1363 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
1364 [dr_tbl_attr.type]);
1367 grp_data->jump.root_action = jump;
1369 grp_data->idx = idx;
1370 grp_data->group_id = attr->group;
1371 grp_data->type = dr_tbl_attr.type;
1372 return &grp_data->entry;
1374 if (grp_data->jump.root_action)
1375 mlx5dr_action_destroy(grp_data->jump.root_action);
1376 if (grp_data->jump.hws_action)
1377 mlx5dr_action_destroy(grp_data->jump.hws_action);
1379 mlx5dr_table_destroy(tbl);
1381 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
1382 rte_flow_error_set(error, ENOMEM,
1383 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1385 "cannot allocate flow dr table");
1390 * Remove group callback.
1392 * @param[in] tool_ctx
1393 * Pointer to the hash list related context.
1395 * Pointer to the entry to be removed.
1398 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1400 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1401 struct mlx5_flow_group *grp_data =
1402 container_of(entry, struct mlx5_flow_group, entry);
1404 MLX5_ASSERT(entry && sh);
1405 /* To use the wrapper glue functions instead. */
1406 if (grp_data->jump.hws_action)
1407 mlx5dr_action_destroy(grp_data->jump.hws_action);
1408 if (grp_data->jump.root_action)
1409 mlx5dr_action_destroy(grp_data->jump.root_action);
1410 mlx5dr_table_destroy(grp_data->tbl);
1411 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1415 * Match group callback.
1417 * @param[in] tool_ctx
1418 * Pointer to the hash list related context.
1420 * Pointer to the group to be matched.
1422 * Pointer to the group matching context.
1425 * 0 on matched, 1 on miss matched.
1428 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
1431 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1432 struct mlx5_flow_group *grp_data =
1433 container_of(entry, struct mlx5_flow_group, entry);
1434 struct rte_flow_attr *attr =
1435 (struct rte_flow_attr *)ctx->data;
1437 return (grp_data->group_id != attr->group) ||
1438 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
1440 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
1442 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
1447 * Clone group entry callback.
1449 * @param[in] tool_ctx
1450 * Pointer to the hash list related context.
1452 * Pointer to the group to be matched.
1454 * Pointer to the group matching context.
1457 * 0 on matched, 1 on miss matched.
1459 struct mlx5_list_entry *
1460 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
1463 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1464 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1465 struct mlx5_flow_group *grp_data;
1466 struct rte_flow_error *error = ctx->error;
1469 grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1471 rte_flow_error_set(error, ENOMEM,
1472 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1474 "cannot allocate flow table data entry");
1477 memcpy(grp_data, oentry, sizeof(*grp_data));
1478 grp_data->idx = idx;
1479 return &grp_data->entry;
1483 * Free cloned group entry callback.
1485 * @param[in] tool_ctx
1486 * Pointer to the hash list related context.
1488 * Pointer to the group to be freed.
1491 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1493 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1494 struct mlx5_flow_group *grp_data =
1495 container_of(entry, struct mlx5_flow_group, entry);
1497 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1501 * Configure port HWS resources.
1504 * Pointer to the rte_eth_dev structure.
1505 * @param[in] port_attr
1506 * Port configuration attributes.
1507 * @param[in] nb_queue
1509 * @param[in] queue_attr
1510 * Array that holds attributes for each flow queue.
1512 * Pointer to error structure.
1515 * 0 on success, a negative errno value otherwise and rte_errno is set.
1519 flow_hw_configure(struct rte_eth_dev *dev,
1520 const struct rte_flow_port_attr *port_attr,
1522 const struct rte_flow_queue_attr *queue_attr[],
1523 struct rte_flow_error *error)
1525 struct mlx5_priv *priv = dev->data->dev_private;
1526 struct mlx5dr_context *dr_ctx = NULL;
1527 struct mlx5dr_context_attr dr_ctx_attr = {0};
1528 struct mlx5_hw_q *hw_q;
1529 struct mlx5_hw_q_job *job = NULL;
1530 uint32_t mem_size, i, j;
1531 struct mlx5_indexed_pool_config cfg = {
1532 .size = sizeof(struct rte_flow_hw),
1535 .release_mem_en = !!priv->sh->config.reclaim_mode,
1536 .malloc = mlx5_malloc,
1538 .type = "mlx5_hw_action_construct_data",
1541 if (!port_attr || !nb_queue || !queue_attr) {
1545 /* In case re-configuring, release existing context at first. */
1548 for (i = 0; i < nb_queue; i++) {
1549 hw_q = &priv->hw_q[i];
1550 /* Make sure all queues are empty. */
1551 if (hw_q->size != hw_q->job_idx) {
1556 flow_hw_resource_release(dev);
1558 priv->acts_ipool = mlx5_ipool_create(&cfg);
1559 if (!priv->acts_ipool)
1561 /* Allocate the queue job descriptor LIFO. */
1562 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
1563 for (i = 0; i < nb_queue; i++) {
1565 * Check if the queues' size are all the same as the
1566 * limitation from HWS layer.
1568 if (queue_attr[i]->size != queue_attr[0]->size) {
1572 mem_size += (sizeof(struct mlx5_hw_q_job *) +
1573 sizeof(struct mlx5_hw_q_job)) *
1574 queue_attr[0]->size;
1576 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
1582 for (i = 0; i < nb_queue; i++) {
1583 priv->hw_q[i].job_idx = queue_attr[i]->size;
1584 priv->hw_q[i].size = queue_attr[i]->size;
1586 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1587 &priv->hw_q[nb_queue];
1589 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1590 &job[queue_attr[i - 1]->size];
1591 job = (struct mlx5_hw_q_job *)
1592 &priv->hw_q[i].job[queue_attr[i]->size];
1593 for (j = 0; j < queue_attr[i]->size; j++)
1594 priv->hw_q[i].job[j] = &job[j];
1596 dr_ctx_attr.pd = priv->sh->cdev->pd;
1597 dr_ctx_attr.queues = nb_queue;
1598 /* Queue size should all be the same. Take the first one. */
1599 dr_ctx_attr.queue_size = queue_attr[0]->size;
1600 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
1601 /* rte_errno has been updated by HWS layer. */
1604 priv->dr_ctx = dr_ctx;
1605 priv->nb_queue = nb_queue;
1606 /* Add global actions. */
1607 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1608 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1609 priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
1610 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
1611 if (!priv->hw_drop[i][j])
1614 priv->hw_tag[i] = mlx5dr_action_create_tag
1615 (priv->dr_ctx, mlx5_hw_act_flag[i][0]);
1616 if (!priv->hw_tag[i])
1621 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1622 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1623 if (priv->hw_drop[i][j])
1624 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1626 if (priv->hw_tag[i])
1627 mlx5dr_action_destroy(priv->hw_tag[i]);
1630 claim_zero(mlx5dr_context_close(dr_ctx));
1631 mlx5_free(priv->hw_q);
1633 if (priv->acts_ipool) {
1634 mlx5_ipool_destroy(priv->acts_ipool);
1635 priv->acts_ipool = NULL;
1637 return rte_flow_error_set(error, rte_errno,
1638 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1639 "fail to configure port");
1643 * Release HWS resources.
1646 * Pointer to the rte_eth_dev structure.
1649 flow_hw_resource_release(struct rte_eth_dev *dev)
1651 struct mlx5_priv *priv = dev->data->dev_private;
1652 struct rte_flow_template_table *tbl;
1653 struct rte_flow_pattern_template *it;
1654 struct rte_flow_actions_template *at;
1659 while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
1660 tbl = LIST_FIRST(&priv->flow_hw_tbl);
1661 flow_hw_table_destroy(dev, tbl, NULL);
1663 while (!LIST_EMPTY(&priv->flow_hw_itt)) {
1664 it = LIST_FIRST(&priv->flow_hw_itt);
1665 flow_hw_pattern_template_destroy(dev, it, NULL);
1667 while (!LIST_EMPTY(&priv->flow_hw_at)) {
1668 at = LIST_FIRST(&priv->flow_hw_at);
1669 flow_hw_actions_template_destroy(dev, at, NULL);
1671 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1672 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1673 if (priv->hw_drop[i][j])
1674 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1676 if (priv->hw_tag[i])
1677 mlx5dr_action_destroy(priv->hw_tag[i]);
1679 if (priv->acts_ipool) {
1680 mlx5_ipool_destroy(priv->acts_ipool);
1681 priv->acts_ipool = NULL;
1683 mlx5_free(priv->hw_q);
1685 claim_zero(mlx5dr_context_close(priv->dr_ctx));
1686 priv->dr_ctx = NULL;
1690 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
1691 .info_get = flow_hw_info_get,
1692 .configure = flow_hw_configure,
1693 .pattern_template_create = flow_hw_pattern_template_create,
1694 .pattern_template_destroy = flow_hw_pattern_template_destroy,
1695 .actions_template_create = flow_hw_actions_template_create,
1696 .actions_template_destroy = flow_hw_actions_template_destroy,
1697 .template_table_create = flow_hw_table_create,
1698 .template_table_destroy = flow_hw_table_destroy,
1699 .async_flow_create = flow_hw_async_flow_create,
1700 .async_flow_destroy = flow_hw_async_flow_destroy,
1701 .pull = flow_hw_pull,
1702 .push = flow_hw_push,