1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
12 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
14 /* The maximum actions support in the flow. */
15 #define MLX5_HW_MAX_ACTS 16
17 /* Default push burst threshold. */
20 /* Default queue to flush the flows. */
21 #define MLX5_DEFAULT_FLUSH_QUEUE 0
23 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
25 /* DR action flags with different table. */
26 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
27 [MLX5DR_TABLE_TYPE_MAX] = {
29 MLX5DR_ACTION_FLAG_ROOT_RX,
30 MLX5DR_ACTION_FLAG_ROOT_TX,
31 MLX5DR_ACTION_FLAG_ROOT_FDB,
34 MLX5DR_ACTION_FLAG_HWS_RX,
35 MLX5DR_ACTION_FLAG_HWS_TX,
36 MLX5DR_ACTION_FLAG_HWS_FDB,
44 * Pointer to the rte_eth_dev structure.
46 * Flag to enable or not.
49 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
51 struct mlx5_priv *priv = dev->data->dev_private;
54 if ((!priv->mark_enabled && !enable) ||
55 (priv->mark_enabled && enable))
57 for (i = 0; i < priv->rxqs_n; ++i) {
58 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
60 rxq_ctrl->rxq.mark = enable;
62 priv->mark_enabled = enable;
66 * Generate the pattern item flags.
67 * Will be used for shared RSS action.
70 * Pointer to the list of items.
76 flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
78 uint64_t item_flags = 0;
79 uint64_t last_item = 0;
81 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
82 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
83 int item_type = items->type;
86 case RTE_FLOW_ITEM_TYPE_IPV4:
87 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
88 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
90 case RTE_FLOW_ITEM_TYPE_IPV6:
91 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
92 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
94 case RTE_FLOW_ITEM_TYPE_TCP:
95 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
96 MLX5_FLOW_LAYER_OUTER_L4_TCP;
98 case RTE_FLOW_ITEM_TYPE_UDP:
99 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
100 MLX5_FLOW_LAYER_OUTER_L4_UDP;
102 case RTE_FLOW_ITEM_TYPE_GRE:
103 last_item = MLX5_FLOW_LAYER_GRE;
105 case RTE_FLOW_ITEM_TYPE_NVGRE:
106 last_item = MLX5_FLOW_LAYER_GRE;
108 case RTE_FLOW_ITEM_TYPE_VXLAN:
109 last_item = MLX5_FLOW_LAYER_VXLAN;
111 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
112 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
114 case RTE_FLOW_ITEM_TYPE_GENEVE:
115 last_item = MLX5_FLOW_LAYER_GENEVE;
117 case RTE_FLOW_ITEM_TYPE_MPLS:
118 last_item = MLX5_FLOW_LAYER_MPLS;
120 case RTE_FLOW_ITEM_TYPE_GTP:
121 last_item = MLX5_FLOW_LAYER_GTP;
126 item_flags |= last_item;
132 * Register destination table DR jump action.
135 * Pointer to the rte_eth_dev structure.
136 * @param[in] table_attr
137 * Pointer to the flow attributes.
138 * @param[in] dest_group
139 * The destination group ID.
141 * Pointer to error structure.
144 * Table on success, NULL otherwise and rte_errno is set.
146 static struct mlx5_hw_jump_action *
147 flow_hw_jump_action_register(struct rte_eth_dev *dev,
148 const struct rte_flow_attr *attr,
150 struct rte_flow_error *error)
152 struct mlx5_priv *priv = dev->data->dev_private;
153 struct rte_flow_attr jattr = *attr;
154 struct mlx5_flow_group *grp;
155 struct mlx5_flow_cb_ctx ctx = {
160 struct mlx5_list_entry *ge;
162 jattr.group = dest_group;
163 ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);
166 grp = container_of(ge, struct mlx5_flow_group, entry);
171 * Release jump action.
174 * Pointer to the rte_eth_dev structure.
176 * Pointer to the jump action.
180 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
182 struct mlx5_priv *priv = dev->data->dev_private;
183 struct mlx5_flow_group *grp;
186 (jump, struct mlx5_flow_group, jump);
187 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
191 * Register queue/RSS action.
194 * Pointer to the rte_eth_dev structure.
195 * @param[in] hws_flags
201 * Table on success, NULL otherwise and rte_errno is set.
203 static inline struct mlx5_hrxq*
204 flow_hw_tir_action_register(struct rte_eth_dev *dev,
206 const struct rte_flow_action *action)
208 struct mlx5_flow_rss_desc rss_desc = {
209 .hws_flags = hws_flags,
211 struct mlx5_hrxq *hrxq;
213 if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
214 const struct rte_flow_action_queue *queue = action->conf;
216 rss_desc.const_q = &queue->index;
217 rss_desc.queue_num = 1;
219 const struct rte_flow_action_rss *rss = action->conf;
221 rss_desc.queue_num = rss->queue_num;
222 rss_desc.const_q = rss->queue;
224 !rss->key ? rss_hash_default_key : rss->key,
225 MLX5_RSS_HASH_KEY_LEN);
226 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
227 rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
228 flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
229 flow_dv_action_rss_l34_hash_adjust(rss->types,
230 &rss_desc.hash_fields);
231 if (rss->level > 1) {
232 rss_desc.hash_fields |= IBV_RX_HASH_INNER;
236 hrxq = mlx5_hrxq_get(dev, &rss_desc);
241 * Destroy DR actions created by action template.
243 * For DR actions created during table creation's action translate.
244 * Need to destroy the DR action when destroying the table.
247 * Pointer to the rte_eth_dev structure.
249 * Pointer to the template HW steering DR actions.
252 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
253 struct mlx5_hw_actions *acts)
255 struct mlx5_priv *priv = dev->data->dev_private;
258 struct mlx5_flow_group *grp;
261 (acts->jump, struct mlx5_flow_group, jump);
262 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
268 * Append dynamic action to the dynamic action list.
271 * Pointer to the port private data structure.
273 * Pointer to the template HW steering DR actions.
276 * @param[in] action_src
277 * Offset of source rte flow action.
278 * @param[in] action_dst
279 * Offset of destination DR action.
282 * 0 on success, negative value otherwise and rte_errno is set.
284 static __rte_always_inline struct mlx5_action_construct_data *
285 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
286 enum rte_flow_action_type type,
290 struct mlx5_action_construct_data *act_data;
293 act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
297 act_data->type = type;
298 act_data->action_src = action_src;
299 act_data->action_dst = action_dst;
304 * Append dynamic action to the dynamic action list.
307 * Pointer to the port private data structure.
309 * Pointer to the template HW steering DR actions.
312 * @param[in] action_src
313 * Offset of source rte flow action.
314 * @param[in] action_dst
315 * Offset of destination DR action.
318 * 0 on success, negative value otherwise and rte_errno is set.
320 static __rte_always_inline int
321 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
322 struct mlx5_hw_actions *acts,
323 enum rte_flow_action_type type,
326 { struct mlx5_action_construct_data *act_data;
328 act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
331 LIST_INSERT_HEAD(&acts->act_list, act_data, next);
336 * Append dynamic encap action to the dynamic action list.
339 * Pointer to the port private data structure.
341 * Pointer to the template HW steering DR actions.
344 * @param[in] action_src
345 * Offset of source rte flow action.
346 * @param[in] action_dst
347 * Offset of destination DR action.
348 * @param[in] encap_src
349 * Offset of source encap raw data.
350 * @param[in] encap_dst
351 * Offset of destination encap raw data.
353 * Length of the data to be updated.
356 * 0 on success, negative value otherwise and rte_errno is set.
358 static __rte_always_inline int
359 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
360 struct mlx5_hw_actions *acts,
361 enum rte_flow_action_type type,
367 { struct mlx5_action_construct_data *act_data;
369 act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
372 act_data->encap.src = encap_src;
373 act_data->encap.dst = encap_dst;
374 act_data->encap.len = len;
375 LIST_INSERT_HEAD(&acts->act_list, act_data, next);
380 * Append shared RSS action to the dynamic action list.
383 * Pointer to the port private data structure.
385 * Pointer to the template HW steering DR actions.
388 * @param[in] action_src
389 * Offset of source rte flow action.
390 * @param[in] action_dst
391 * Offset of destination DR action.
395 * Pointer to the shared RSS info.
398 * 0 on success, negative value otherwise and rte_errno is set.
400 static __rte_always_inline int
401 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
402 struct mlx5_hw_actions *acts,
403 enum rte_flow_action_type type,
407 struct mlx5_shared_action_rss *rss)
408 { struct mlx5_action_construct_data *act_data;
410 act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
413 act_data->shared_rss.level = rss->origin.level;
414 act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
416 act_data->shared_rss.idx = idx;
417 LIST_INSERT_HEAD(&acts->act_list, act_data, next);
422 * Translate shared indirect action.
425 * Pointer to the rte_eth_dev data structure.
427 * Pointer to the shared indirect rte_flow action.
429 * Pointer to the template HW steering DR actions.
430 * @param[in] action_src
431 * Offset of source rte flow action.
432 * @param[in] action_dst
433 * Offset of destination DR action.
436 * 0 on success, negative value otherwise and rte_errno is set.
438 static __rte_always_inline int
439 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
440 const struct rte_flow_action *action,
441 struct mlx5_hw_actions *acts,
445 struct mlx5_priv *priv = dev->data->dev_private;
446 struct mlx5_shared_action_rss *shared_rss;
447 uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
448 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
449 uint32_t idx = act_idx &
450 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
453 case MLX5_INDIRECT_ACTION_TYPE_RSS:
454 shared_rss = mlx5_ipool_get
455 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
456 if (!shared_rss || __flow_hw_act_data_shared_rss_append
458 (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
459 action_src, action_dst, idx, shared_rss))
463 DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
470 * Translate encap items to encapsulation list.
473 * Pointer to the rte_eth_dev data structure.
475 * Pointer to the template HW steering DR actions.
478 * @param[in] action_src
479 * Offset of source rte flow action.
480 * @param[in] action_dst
481 * Offset of destination DR action.
483 * Encap item pattern.
485 * Encap item mask indicates which part are constant and dynamic.
488 * 0 on success, negative value otherwise and rte_errno is set.
490 static __rte_always_inline int
491 flow_hw_encap_item_translate(struct rte_eth_dev *dev,
492 struct mlx5_hw_actions *acts,
493 enum rte_flow_action_type type,
496 const struct rte_flow_item *items,
497 const struct rte_flow_item *items_m)
499 struct mlx5_priv *priv = dev->data->dev_private;
500 size_t len, total_len = 0;
503 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++, items_m++, i++) {
504 len = flow_dv_get_item_hdr_len(items->type);
505 if ((!items_m->spec ||
506 memcmp(items_m->spec, items->spec, len)) &&
507 __flow_hw_act_data_encap_append(priv, acts, type,
508 action_src, action_dst, i,
517 * Translate rte_flow actions to DR action.
519 * As the action template has already indicated the actions. Translate
520 * the rte_flow actions to DR action if possbile. So in flow create
521 * stage we will save cycles from handing the actions' organizing.
522 * For the actions with limited information, need to add these to a
526 * Pointer to the rte_eth_dev structure.
527 * @param[in] table_attr
528 * Pointer to the table attributes.
529 * @param[in] item_templates
530 * Item template array to be binded to the table.
531 * @param[in/out] acts
532 * Pointer to the template HW steering DR actions.
536 * Pointer to error structure.
539 * Table on success, NULL otherwise and rte_errno is set.
542 flow_hw_actions_translate(struct rte_eth_dev *dev,
543 const struct rte_flow_template_table_attr *table_attr,
544 struct mlx5_hw_actions *acts,
545 struct rte_flow_actions_template *at,
546 struct rte_flow_error *error)
548 struct mlx5_priv *priv = dev->data->dev_private;
549 const struct rte_flow_attr *attr = &table_attr->flow_attr;
550 struct rte_flow_action *actions = at->actions;
551 struct rte_flow_action *action_start = actions;
552 struct rte_flow_action *masks = at->masks;
553 enum mlx5dr_action_reformat_type refmt_type = 0;
554 const struct rte_flow_action_raw_encap *raw_encap_data;
555 const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
556 uint16_t reformat_pos = MLX5_HW_MAX_ACTS, reformat_src = 0;
557 uint8_t *encap_data = NULL;
558 size_t data_size = 0;
559 bool actions_end = false;
564 type = MLX5DR_TABLE_TYPE_FDB;
565 else if (attr->egress)
566 type = MLX5DR_TABLE_TYPE_NIC_TX;
568 type = MLX5DR_TABLE_TYPE_NIC_RX;
569 for (i = 0; !actions_end; actions++, masks++) {
570 switch (actions->type) {
571 case RTE_FLOW_ACTION_TYPE_INDIRECT:
573 DRV_LOG(ERR, "Indirect action is not supported in root table.");
576 if (actions->conf && masks->conf) {
577 if (flow_hw_shared_action_translate
578 (dev, actions, acts, actions - action_start, i))
580 } else if (__flow_hw_act_data_general_append
581 (priv, acts, actions->type,
582 actions - action_start, i)){
587 case RTE_FLOW_ACTION_TYPE_VOID:
589 case RTE_FLOW_ACTION_TYPE_DROP:
590 acts->rule_acts[i++].action =
591 priv->hw_drop[!!attr->group][type];
593 case RTE_FLOW_ACTION_TYPE_MARK:
596 acts->rule_acts[i].tag.value =
598 (((const struct rte_flow_action_mark *)
600 else if (__flow_hw_act_data_general_append(priv, acts,
601 actions->type, actions - action_start, i))
603 acts->rule_acts[i++].action =
604 priv->hw_tag[!!attr->group];
605 flow_hw_rxq_flag_set(dev, true);
607 case RTE_FLOW_ACTION_TYPE_JUMP:
609 uint32_t jump_group =
610 ((const struct rte_flow_action_jump *)
611 actions->conf)->group;
612 acts->jump = flow_hw_jump_action_register
613 (dev, attr, jump_group, error);
616 acts->rule_acts[i].action = (!!attr->group) ?
617 acts->jump->hws_action :
618 acts->jump->root_action;
619 } else if (__flow_hw_act_data_general_append
620 (priv, acts, actions->type,
621 actions - action_start, i)){
626 case RTE_FLOW_ACTION_TYPE_QUEUE:
628 acts->tir = flow_hw_tir_action_register
630 mlx5_hw_act_flag[!!attr->group][type],
634 acts->rule_acts[i].action =
636 } else if (__flow_hw_act_data_general_append
637 (priv, acts, actions->type,
638 actions - action_start, i)) {
643 case RTE_FLOW_ACTION_TYPE_RSS:
645 acts->tir = flow_hw_tir_action_register
647 mlx5_hw_act_flag[!!attr->group][type],
651 acts->rule_acts[i].action =
653 } else if (__flow_hw_act_data_general_append
654 (priv, acts, actions->type,
655 actions - action_start, i)) {
660 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
661 MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS);
662 enc_item = ((const struct rte_flow_action_vxlan_encap *)
663 actions->conf)->definition;
665 ((const struct rte_flow_action_vxlan_encap *)
666 masks->conf)->definition;
668 reformat_src = actions - action_start;
669 refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
671 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
672 MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS);
673 enc_item = ((const struct rte_flow_action_nvgre_encap *)
674 actions->conf)->definition;
676 ((const struct rte_flow_action_nvgre_encap *)
677 actions->conf)->definition;
679 reformat_src = actions - action_start;
680 refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
682 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
683 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
684 MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS);
686 refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
688 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
690 (const struct rte_flow_action_raw_encap *)
692 encap_data = raw_encap_data->data;
693 data_size = raw_encap_data->size;
694 if (reformat_pos != MLX5_HW_MAX_ACTS) {
695 refmt_type = data_size <
696 MLX5_ENCAPSULATION_DECISION_SIZE ?
697 MLX5DR_ACTION_REFORMAT_TYPE_TNL_L3_TO_L2 :
698 MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L3;
702 MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
704 reformat_src = actions - action_start;
706 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
708 refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
710 case RTE_FLOW_ACTION_TYPE_END:
717 if (reformat_pos != MLX5_HW_MAX_ACTS) {
718 uint8_t buf[MLX5_ENCAP_MAX_LEN];
721 MLX5_ASSERT(!encap_data);
722 if (flow_dv_convert_encap_data
723 (enc_item, buf, &data_size, error) ||
724 flow_hw_encap_item_translate
725 (dev, acts, (action_start + reformat_src)->type,
726 reformat_src, reformat_pos,
727 enc_item, enc_item_m))
730 } else if (encap_data && __flow_hw_act_data_encap_append
732 (action_start + reformat_src)->type,
733 reformat_src, reformat_pos, 0, 0, data_size)) {
736 acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
737 sizeof(*acts->encap_decap) + data_size,
739 if (!acts->encap_decap)
742 acts->encap_decap->data_size = data_size;
743 memcpy(acts->encap_decap->data, encap_data, data_size);
745 acts->encap_decap->action = mlx5dr_action_create_reformat
746 (priv->dr_ctx, refmt_type,
747 data_size, encap_data,
748 rte_log2_u32(table_attr->nb_flows),
749 mlx5_hw_act_flag[!!attr->group][type]);
750 if (!acts->encap_decap->action)
752 acts->rule_acts[reformat_pos].action =
753 acts->encap_decap->action;
754 acts->encap_decap_pos = reformat_pos;
760 __flow_hw_action_template_destroy(dev, acts);
761 return rte_flow_error_set(error, err,
762 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
763 "fail to create rte table");
767 * Get shared indirect action.
770 * Pointer to the rte_eth_dev data structure.
771 * @param[in] act_data
772 * Pointer to the recorded action construct data.
773 * @param[in] item_flags
774 * The matcher itme_flags used for RSS lookup.
775 * @param[in] rule_act
776 * Pointer to the shared action's destination rule DR action.
779 * 0 on success, negative value otherwise and rte_errno is set.
781 static __rte_always_inline int
782 flow_hw_shared_action_get(struct rte_eth_dev *dev,
783 struct mlx5_action_construct_data *act_data,
784 const uint64_t item_flags,
785 struct mlx5dr_rule_action *rule_act)
787 struct mlx5_priv *priv = dev->data->dev_private;
788 struct mlx5_flow_rss_desc rss_desc = { 0 };
789 uint64_t hash_fields = 0;
790 uint32_t hrxq_idx = 0;
791 struct mlx5_hrxq *hrxq = NULL;
792 int act_type = act_data->type;
795 case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
796 rss_desc.level = act_data->shared_rss.level;
797 rss_desc.types = act_data->shared_rss.types;
798 flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
799 hrxq_idx = flow_dv_action_rss_hrxq_lookup
800 (dev, act_data->shared_rss.idx, hash_fields);
802 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
805 rule_act->action = hrxq->action;
810 DRV_LOG(WARNING, "Unsupported shared action type:%d",
818 * Construct shared indirect action.
821 * Pointer to the rte_eth_dev data structure.
823 * Pointer to the shared indirect rte_flow action.
825 * Pointer to the flow table.
827 * Item template index the action template refer to.
828 * @param[in] rule_act
829 * Pointer to the shared action's destination rule DR action.
832 * 0 on success, negative value otherwise and rte_errno is set.
834 static __rte_always_inline int
835 flow_hw_shared_action_construct(struct rte_eth_dev *dev,
836 const struct rte_flow_action *action,
837 struct rte_flow_template_table *table,
838 const uint8_t it_idx,
839 struct mlx5dr_rule_action *rule_act)
841 struct mlx5_priv *priv = dev->data->dev_private;
842 struct mlx5_action_construct_data act_data;
843 struct mlx5_shared_action_rss *shared_rss;
844 uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
845 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
846 uint32_t idx = act_idx &
847 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
850 memset(&act_data, 0, sizeof(act_data));
852 case MLX5_INDIRECT_ACTION_TYPE_RSS:
853 act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
854 shared_rss = mlx5_ipool_get
855 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
858 act_data.shared_rss.idx = idx;
859 act_data.shared_rss.level = shared_rss->origin.level;
860 act_data.shared_rss.types = !shared_rss->origin.types ?
862 shared_rss->origin.types;
863 item_flags = table->its[it_idx]->item_flags;
864 if (flow_hw_shared_action_get
865 (dev, &act_data, item_flags, rule_act))
869 DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
876 * Construct flow action array.
878 * For action template contains dynamic actions, these actions need to
879 * be updated according to the rte_flow action during flow creation.
882 * Pointer to the rte_eth_dev structure.
884 * Pointer to job descriptor.
886 * Pointer to translated actions from template.
888 * Item template index the action template refer to.
890 * Array of rte_flow action need to be checked.
891 * @param[in] rule_acts
892 * Array of DR rule actions to be used during flow creation..
893 * @param[in] acts_num
894 * Pointer to the real acts_num flow has.
897 * 0 on success, negative value otherwise and rte_errno is set.
899 static __rte_always_inline int
900 flow_hw_actions_construct(struct rte_eth_dev *dev,
901 struct mlx5_hw_q_job *job,
902 const struct mlx5_hw_actions *hw_acts,
903 const uint8_t it_idx,
904 const struct rte_flow_action actions[],
905 struct mlx5dr_rule_action *rule_acts,
908 struct rte_flow_template_table *table = job->flow->table;
909 struct mlx5_action_construct_data *act_data;
910 const struct rte_flow_action *action;
911 const struct rte_flow_action_raw_encap *raw_encap_data;
912 const struct rte_flow_item *enc_item = NULL;
913 uint8_t *buf = job->encap_data;
914 struct rte_flow_attr attr = {
919 memcpy(rule_acts, hw_acts->rule_acts,
920 sizeof(*rule_acts) * hw_acts->acts_num);
921 *acts_num = hw_acts->acts_num;
922 if (LIST_EMPTY(&hw_acts->act_list))
924 attr.group = table->grp->group_id;
925 ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
926 if (table->type == MLX5DR_TABLE_TYPE_FDB) {
929 } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
935 if (hw_acts->encap_decap && hw_acts->encap_decap->data_size)
936 memcpy(buf, hw_acts->encap_decap->data,
937 hw_acts->encap_decap->data_size);
938 LIST_FOREACH(act_data, &hw_acts->act_list, next) {
942 struct mlx5_hw_jump_action *jump;
943 struct mlx5_hrxq *hrxq;
945 action = &actions[act_data->action_src];
946 MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
947 (int)action->type == act_data->type);
948 switch (act_data->type) {
949 case RTE_FLOW_ACTION_TYPE_INDIRECT:
950 if (flow_hw_shared_action_construct
951 (dev, action, table, it_idx,
952 &rule_acts[act_data->action_dst]))
955 case RTE_FLOW_ACTION_TYPE_VOID:
957 case RTE_FLOW_ACTION_TYPE_MARK:
958 tag = mlx5_flow_mark_set
959 (((const struct rte_flow_action_mark *)
960 (action->conf))->id);
961 rule_acts[act_data->action_dst].tag.value = tag;
963 case RTE_FLOW_ACTION_TYPE_JUMP:
964 jump_group = ((const struct rte_flow_action_jump *)
965 action->conf)->group;
966 jump = flow_hw_jump_action_register
967 (dev, &attr, jump_group, NULL);
970 rule_acts[act_data->action_dst].action =
971 (!!attr.group) ? jump->hws_action : jump->root_action;
972 job->flow->jump = jump;
973 job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
975 case RTE_FLOW_ACTION_TYPE_RSS:
976 case RTE_FLOW_ACTION_TYPE_QUEUE:
977 hrxq = flow_hw_tir_action_register(dev,
982 rule_acts[act_data->action_dst].action = hrxq->action;
983 job->flow->hrxq = hrxq;
984 job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
986 case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
987 item_flags = table->its[it_idx]->item_flags;
988 if (flow_hw_shared_action_get
989 (dev, act_data, item_flags,
990 &rule_acts[act_data->action_dst]))
993 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
994 enc_item = ((const struct rte_flow_action_vxlan_encap *)
995 action->conf)->definition;
996 rte_memcpy((void *)&buf[act_data->encap.dst],
997 enc_item[act_data->encap.src].spec,
998 act_data->encap.len);
1000 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1001 enc_item = ((const struct rte_flow_action_nvgre_encap *)
1002 action->conf)->definition;
1003 rte_memcpy((void *)&buf[act_data->encap.dst],
1004 enc_item[act_data->encap.src].spec,
1005 act_data->encap.len);
1007 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1009 (const struct rte_flow_action_raw_encap *)
1011 rte_memcpy((void *)&buf[act_data->encap.dst],
1012 raw_encap_data->data, act_data->encap.len);
1013 MLX5_ASSERT(raw_encap_data->size ==
1014 act_data->encap.len);
1020 if (hw_acts->encap_decap) {
1021 rule_acts[hw_acts->encap_decap_pos].reformat.offset =
1023 rule_acts[hw_acts->encap_decap_pos].reformat.data = buf;
1029 * Enqueue HW steering flow creation.
1031 * The flow will be applied to the HW only if the postpone bit is not set or
1032 * the extra push function is called.
1033 * The flow creation status should be checked from dequeue result.
1036 * Pointer to the rte_eth_dev structure.
1038 * The queue to create the flow.
1040 * Pointer to the flow operation attributes.
1042 * Items with flow spec value.
1043 * @param[in] pattern_template_index
1044 * The item pattern flow follows from the table.
1045 * @param[in] actions
1046 * Action with flow spec value.
1047 * @param[in] action_template_index
1048 * The action pattern flow follows from the table.
1049 * @param[in] user_data
1050 * Pointer to the user_data.
1052 * Pointer to error structure.
1055 * Flow pointer on success, NULL otherwise and rte_errno is set.
1057 static struct rte_flow *
1058 flow_hw_async_flow_create(struct rte_eth_dev *dev,
1060 const struct rte_flow_op_attr *attr,
1061 struct rte_flow_template_table *table,
1062 const struct rte_flow_item items[],
1063 uint8_t pattern_template_index,
1064 const struct rte_flow_action actions[],
1065 uint8_t action_template_index,
1067 struct rte_flow_error *error)
1069 struct mlx5_priv *priv = dev->data->dev_private;
1070 struct mlx5dr_rule_attr rule_attr = {
1072 .user_data = user_data,
1073 .burst = attr->postpone,
1075 struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1076 struct mlx5_hw_actions *hw_acts;
1077 struct rte_flow_hw *flow;
1078 struct mlx5_hw_q_job *job;
1079 uint32_t acts_num, flow_idx;
1082 if (unlikely(!priv->hw_q[queue].job_idx)) {
1086 flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
1090 * Set the table here in order to know the destination table
1091 * when free the flow afterwards.
1093 flow->table = table;
1094 flow->idx = flow_idx;
1095 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
1097 * Set the job type here in order to know if the flow memory
1098 * should be freed or not when get the result from dequeue.
1100 job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
1102 job->user_data = user_data;
1103 rule_attr.user_data = job;
1104 hw_acts = &table->ats[action_template_index].acts;
1105 /* Construct the flow action array based on the input actions.*/
1106 flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,
1107 actions, rule_acts, &acts_num);
1108 ret = mlx5dr_rule_create(table->matcher,
1109 pattern_template_index, items,
1110 rule_acts, acts_num,
1111 &rule_attr, &flow->rule);
1113 return (struct rte_flow *)flow;
1114 /* Flow created fail, return the descriptor and flow memory. */
1115 mlx5_ipool_free(table->flow, flow_idx);
1116 priv->hw_q[queue].job_idx++;
1118 rte_flow_error_set(error, rte_errno,
1119 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1120 "fail to create rte flow");
1125 * Enqueue HW steering flow destruction.
1127 * The flow will be applied to the HW only if the postpone bit is not set or
1128 * the extra push function is called.
1129 * The flow destruction status should be checked from dequeue result.
1132 * Pointer to the rte_eth_dev structure.
1134 * The queue to destroy the flow.
1136 * Pointer to the flow operation attributes.
1138 * Pointer to the flow to be destroyed.
1139 * @param[in] user_data
1140 * Pointer to the user_data.
1142 * Pointer to error structure.
1145 * 0 on success, negative value otherwise and rte_errno is set.
1148 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
1150 const struct rte_flow_op_attr *attr,
1151 struct rte_flow *flow,
1153 struct rte_flow_error *error)
1155 struct mlx5_priv *priv = dev->data->dev_private;
1156 struct mlx5dr_rule_attr rule_attr = {
1158 .user_data = user_data,
1159 .burst = attr->postpone,
1161 struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
1162 struct mlx5_hw_q_job *job;
1165 if (unlikely(!priv->hw_q[queue].job_idx)) {
1169 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
1170 job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
1171 job->user_data = user_data;
1173 rule_attr.user_data = job;
1174 ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
1177 priv->hw_q[queue].job_idx++;
1179 return rte_flow_error_set(error, rte_errno,
1180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1181 "fail to create rte flow");
1185 * Pull the enqueued flows.
1187 * For flows enqueued from creation/destruction, the status should be
1188 * checked from the dequeue result.
1191 * Pointer to the rte_eth_dev structure.
1193 * The queue to pull the result.
1194 * @param[in/out] res
1195 * Array to save the results.
1197 * Available result with the array.
1199 * Pointer to error structure.
1202 * Result number on success, negative value otherwise and rte_errno is set.
1205 flow_hw_pull(struct rte_eth_dev *dev,
1207 struct rte_flow_op_result res[],
1209 struct rte_flow_error *error)
1211 struct mlx5_priv *priv = dev->data->dev_private;
1212 struct mlx5_hw_q_job *job;
1215 ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
1217 return rte_flow_error_set(error, rte_errno,
1218 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1219 "fail to query flow queue");
1220 for (i = 0; i < ret; i++) {
1221 job = (struct mlx5_hw_q_job *)res[i].user_data;
1222 /* Restore user data. */
1223 res[i].user_data = job->user_data;
1224 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
1225 if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
1226 flow_hw_jump_release(dev, job->flow->jump);
1227 else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
1228 mlx5_hrxq_obj_release(dev, job->flow->hrxq);
1229 mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
1231 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
1237 * Push the enqueued flows to HW.
1239 * Force apply all the enqueued flows to the HW.
1242 * Pointer to the rte_eth_dev structure.
1244 * The queue to push the flow.
1246 * Pointer to error structure.
1249 * 0 on success, negative value otherwise and rte_errno is set.
1252 flow_hw_push(struct rte_eth_dev *dev,
1254 struct rte_flow_error *error)
1256 struct mlx5_priv *priv = dev->data->dev_private;
1259 ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
1260 MLX5DR_SEND_QUEUE_ACTION_DRAIN);
1262 rte_flow_error_set(error, rte_errno,
1263 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1264 "fail to push flows");
1271 * Drain the enqueued flows' completion.
1274 * Pointer to the rte_eth_dev structure.
1276 * The queue to pull the flow.
1277 * @param[in] pending_rules
1278 * The pending flow number.
1280 * Pointer to error structure.
1283 * 0 on success, negative value otherwise and rte_errno is set.
1286 __flow_hw_pull_comp(struct rte_eth_dev *dev,
1288 uint32_t pending_rules,
1289 struct rte_flow_error *error)
1291 struct rte_flow_op_result comp[BURST_THR];
1292 int ret, i, empty_loop = 0;
1294 flow_hw_push(dev, queue, error);
1295 while (pending_rules) {
1296 ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
1300 rte_delay_us_sleep(20000);
1301 if (++empty_loop > 5) {
1302 DRV_LOG(WARNING, "No available dequeue, quit.");
1307 for (i = 0; i < ret; i++) {
1308 if (comp[i].status == RTE_FLOW_OP_ERROR)
1309 DRV_LOG(WARNING, "Flow flush get error CQE.");
1311 if ((uint32_t)ret > pending_rules) {
1312 DRV_LOG(WARNING, "Flow flush get extra CQE.");
1313 return rte_flow_error_set(error, ERANGE,
1314 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1317 pending_rules -= ret;
1324 * Flush created flows.
1327 * Pointer to the rte_eth_dev structure.
1329 * Pointer to error structure.
1332 * 0 on success, negative value otherwise and rte_errno is set.
1335 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
1336 struct rte_flow_error *error)
1338 struct mlx5_priv *priv = dev->data->dev_private;
1339 struct mlx5_hw_q *hw_q;
1340 struct rte_flow_template_table *tbl;
1341 struct rte_flow_hw *flow;
1342 struct rte_flow_op_attr attr = {
1345 uint32_t pending_rules = 0;
1350 * Ensure to push and dequeue all the enqueued flow
1351 * creation/destruction jobs in case user forgot to
1352 * dequeue. Or the enqueued created flows will be
1353 * leaked. The forgotten dequeues would also cause
1354 * flow flush get extra CQEs as expected and pending_rules
1357 for (queue = 0; queue < priv->nb_queue; queue++) {
1358 hw_q = &priv->hw_q[queue];
1359 if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
1363 /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
1364 hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
1365 LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
1366 MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
1367 if (flow_hw_async_flow_destroy(dev,
1368 MLX5_DEFAULT_FLUSH_QUEUE,
1370 (struct rte_flow *)flow,
1375 /* Drain completion with queue size. */
1376 if (pending_rules >= hw_q->size) {
1377 if (__flow_hw_pull_comp(dev,
1378 MLX5_DEFAULT_FLUSH_QUEUE,
1379 pending_rules, error))
1385 /* Drain left completion. */
1386 if (pending_rules &&
1387 __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
1394 * Create flow table.
1396 * The input item and action templates will be binded to the table.
1397 * Flow memory will also be allocated. Matcher will be created based
1398 * on the item template. Action will be translated to the dedicated
1399 * DR action if possible.
1402 * Pointer to the rte_eth_dev structure.
1404 * Pointer to the table attributes.
1405 * @param[in] item_templates
1406 * Item template array to be binded to the table.
1407 * @param[in] nb_item_templates
1408 * Number of item template.
1409 * @param[in] action_templates
1410 * Action template array to be binded to the table.
1411 * @param[in] nb_action_templates
1412 * Number of action template.
1414 * Pointer to error structure.
1417 * Table on success, NULL otherwise and rte_errno is set.
1419 static struct rte_flow_template_table *
1420 flow_hw_table_create(struct rte_eth_dev *dev,
1421 const struct rte_flow_template_table_attr *attr,
1422 struct rte_flow_pattern_template *item_templates[],
1423 uint8_t nb_item_templates,
1424 struct rte_flow_actions_template *action_templates[],
1425 uint8_t nb_action_templates,
1426 struct rte_flow_error *error)
1428 struct mlx5_priv *priv = dev->data->dev_private;
1429 struct mlx5dr_matcher_attr matcher_attr = {0};
1430 struct rte_flow_template_table *tbl = NULL;
1431 struct mlx5_flow_group *grp;
1432 struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1433 struct rte_flow_attr flow_attr = attr->flow_attr;
1434 struct mlx5_flow_cb_ctx ctx = {
1439 struct mlx5_indexed_pool_config cfg = {
1440 .size = sizeof(struct rte_flow_hw),
1441 .trunk_size = 1 << 12,
1442 .per_core_cache = 1 << 13,
1444 .release_mem_en = !!priv->sh->config.reclaim_mode,
1445 .malloc = mlx5_malloc,
1447 .type = "mlx5_hw_table_flow",
1449 struct mlx5_list_entry *ge;
1450 uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
1451 uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
1454 /* HWS layer accepts only 1 item template with root table. */
1455 if (!attr->flow_attr.group)
1457 cfg.max_idx = nb_flows;
1458 /* For table has very limited flows, disable cache. */
1459 if (nb_flows < cfg.trunk_size) {
1460 cfg.per_core_cache = 0;
1461 cfg.trunk_size = nb_flows;
1463 /* Check if we requires too many templates. */
1464 if (nb_item_templates > max_tpl ||
1465 nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
1469 /* Allocate the table memory. */
1470 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
1473 /* Allocate flow indexed pool. */
1474 tbl->flow = mlx5_ipool_create(&cfg);
1477 /* Register the flow group. */
1478 ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
1481 grp = container_of(ge, struct mlx5_flow_group, entry);
1483 /* Prepare matcher information. */
1484 matcher_attr.priority = attr->flow_attr.priority;
1485 matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
1486 matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
1487 /* Build the item template. */
1488 for (i = 0; i < nb_item_templates; i++) {
1491 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
1497 mt[i] = item_templates[i]->mt;
1498 tbl->its[i] = item_templates[i];
1500 tbl->matcher = mlx5dr_matcher_create
1501 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
1504 tbl->nb_item_templates = nb_item_templates;
1505 /* Build the action template. */
1506 for (i = 0; i < nb_action_templates; i++) {
1509 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
1515 LIST_INIT(&tbl->ats[i].acts.act_list);
1516 err = flow_hw_actions_translate(dev, attr,
1518 action_templates[i], error);
1523 tbl->ats[i].action_template = action_templates[i];
1525 tbl->nb_action_templates = nb_action_templates;
1526 tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
1527 (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
1528 MLX5DR_TABLE_TYPE_NIC_RX);
1529 LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
1533 __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
1534 __atomic_sub_fetch(&action_templates[i]->refcnt,
1535 1, __ATOMIC_RELAXED);
1537 i = nb_item_templates;
1540 __atomic_sub_fetch(&item_templates[i]->refcnt,
1541 1, __ATOMIC_RELAXED);
1542 mlx5dr_matcher_destroy(tbl->matcher);
1547 mlx5_hlist_unregister(priv->sh->groups,
1550 mlx5_ipool_destroy(tbl->flow);
1553 rte_flow_error_set(error, err,
1554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1555 "fail to create rte table");
1560 * Destroy flow table.
1563 * Pointer to the rte_eth_dev structure.
1565 * Pointer to the table to be destroyed.
1567 * Pointer to error structure.
1570 * 0 on success, a negative errno value otherwise and rte_errno is set.
1573 flow_hw_table_destroy(struct rte_eth_dev *dev,
1574 struct rte_flow_template_table *table,
1575 struct rte_flow_error *error)
1577 struct mlx5_priv *priv = dev->data->dev_private;
1580 if (table->refcnt) {
1581 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
1582 return rte_flow_error_set(error, EBUSY,
1583 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1587 LIST_REMOVE(table, next);
1588 for (i = 0; i < table->nb_item_templates; i++)
1589 __atomic_sub_fetch(&table->its[i]->refcnt,
1590 1, __ATOMIC_RELAXED);
1591 for (i = 0; i < table->nb_action_templates; i++) {
1592 if (table->ats[i].acts.mark)
1593 flow_hw_rxq_flag_set(dev, false);
1594 __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
1595 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
1596 1, __ATOMIC_RELAXED);
1598 mlx5dr_matcher_destroy(table->matcher);
1599 mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
1600 mlx5_ipool_destroy(table->flow);
1606 * Create flow action template.
1609 * Pointer to the rte_eth_dev structure.
1611 * Pointer to the action template attributes.
1612 * @param[in] actions
1613 * Associated actions (list terminated by the END action).
1615 * List of actions that marks which of the action's member is constant.
1617 * Pointer to error structure.
1620 * Action template pointer on success, NULL otherwise and rte_errno is set.
1622 static struct rte_flow_actions_template *
1623 flow_hw_actions_template_create(struct rte_eth_dev *dev,
1624 const struct rte_flow_actions_template_attr *attr,
1625 const struct rte_flow_action actions[],
1626 const struct rte_flow_action masks[],
1627 struct rte_flow_error *error)
1629 struct mlx5_priv *priv = dev->data->dev_private;
1630 int len, act_len, mask_len, i;
1631 struct rte_flow_actions_template *at;
1633 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1634 NULL, 0, actions, error);
1637 len = RTE_ALIGN(act_len, 16);
1638 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1639 NULL, 0, masks, error);
1642 len += RTE_ALIGN(mask_len, 16);
1643 at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
1645 rte_flow_error_set(error, ENOMEM,
1646 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1648 "cannot allocate action template");
1652 at->actions = (struct rte_flow_action *)(at + 1);
1653 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
1657 at->masks = (struct rte_flow_action *)
1658 (((uint8_t *)at->actions) + act_len);
1659 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
1660 len - act_len, masks, error);
1664 * mlx5 PMD hacks indirect action index directly to the action conf.
1665 * The rte_flow_conv() function copies the content from conf pointer.
1666 * Need to restore the indirect action index from action conf here.
1668 for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
1669 actions++, masks++, i++) {
1670 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
1671 at->actions[i].conf = actions->conf;
1672 at->masks[i].conf = masks->conf;
1675 __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
1676 LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
1684 * Destroy flow action template.
1687 * Pointer to the rte_eth_dev structure.
1688 * @param[in] template
1689 * Pointer to the action template to be destroyed.
1691 * Pointer to error structure.
1694 * 0 on success, a negative errno value otherwise and rte_errno is set.
1697 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
1698 struct rte_flow_actions_template *template,
1699 struct rte_flow_error *error __rte_unused)
1701 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1702 DRV_LOG(WARNING, "Action template %p is still in use.",
1704 return rte_flow_error_set(error, EBUSY,
1705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707 "action template in using");
1709 LIST_REMOVE(template, next);
1710 mlx5_free(template);
1715 * Create flow item template.
1718 * Pointer to the rte_eth_dev structure.
1720 * Pointer to the item template attributes.
1722 * The template item pattern.
1724 * Pointer to error structure.
1727 * Item template pointer on success, NULL otherwise and rte_errno is set.
1729 static struct rte_flow_pattern_template *
1730 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
1731 const struct rte_flow_pattern_template_attr *attr,
1732 const struct rte_flow_item items[],
1733 struct rte_flow_error *error)
1735 struct mlx5_priv *priv = dev->data->dev_private;
1736 struct rte_flow_pattern_template *it;
1738 it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
1740 rte_flow_error_set(error, ENOMEM,
1741 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1743 "cannot allocate item template");
1747 it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
1750 rte_flow_error_set(error, rte_errno,
1751 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1753 "cannot create match template");
1756 it->item_flags = flow_hw_rss_item_flags_get(items);
1757 __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
1758 LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
1763 * Destroy flow item template.
1766 * Pointer to the rte_eth_dev structure.
1767 * @param[in] template
1768 * Pointer to the item template to be destroyed.
1770 * Pointer to error structure.
1773 * 0 on success, a negative errno value otherwise and rte_errno is set.
1776 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
1777 struct rte_flow_pattern_template *template,
1778 struct rte_flow_error *error __rte_unused)
1780 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1781 DRV_LOG(WARNING, "Item template %p is still in use.",
1783 return rte_flow_error_set(error, EBUSY,
1784 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786 "item template in using");
1788 LIST_REMOVE(template, next);
1789 claim_zero(mlx5dr_match_template_destroy(template->mt));
1790 mlx5_free(template);
1795 * Get information about HWS pre-configurable resources.
1798 * Pointer to the rte_eth_dev structure.
1799 * @param[out] port_info
1800 * Pointer to port information.
1801 * @param[out] queue_info
1802 * Pointer to queue information.
1804 * Pointer to error structure.
1807 * 0 on success, a negative errno value otherwise and rte_errno is set.
1810 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
1811 struct rte_flow_port_info *port_info __rte_unused,
1812 struct rte_flow_queue_info *queue_info __rte_unused,
1813 struct rte_flow_error *error __rte_unused)
1815 /* Nothing to be updated currently. */
1816 memset(port_info, 0, sizeof(*port_info));
1817 /* Queue size is unlimited from low-level. */
1818 queue_info->max_size = UINT32_MAX;
1823 * Create group callback.
1825 * @param[in] tool_ctx
1826 * Pointer to the hash list related context.
1828 * Pointer to the group creation context.
1831 * Group entry on success, NULL otherwise and rte_errno is set.
1833 struct mlx5_list_entry *
1834 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
1836 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1837 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1838 struct rte_eth_dev *dev = ctx->dev;
1839 struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
1840 struct mlx5_priv *priv = dev->data->dev_private;
1841 struct mlx5dr_table_attr dr_tbl_attr = {0};
1842 struct rte_flow_error *error = ctx->error;
1843 struct mlx5_flow_group *grp_data;
1844 struct mlx5dr_table *tbl = NULL;
1845 struct mlx5dr_action *jump;
1848 grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1850 rte_flow_error_set(error, ENOMEM,
1851 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1853 "cannot allocate flow table data entry");
1856 dr_tbl_attr.level = attr->group;
1858 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
1859 else if (attr->egress)
1860 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
1862 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
1863 tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
1866 grp_data->tbl = tbl;
1868 /* Jump action be used by non-root table. */
1869 jump = mlx5dr_action_create_dest_table
1871 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
1874 grp_data->jump.hws_action = jump;
1875 /* Jump action be used by root table. */
1876 jump = mlx5dr_action_create_dest_table
1878 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
1879 [dr_tbl_attr.type]);
1882 grp_data->jump.root_action = jump;
1884 grp_data->idx = idx;
1885 grp_data->group_id = attr->group;
1886 grp_data->type = dr_tbl_attr.type;
1887 return &grp_data->entry;
1889 if (grp_data->jump.root_action)
1890 mlx5dr_action_destroy(grp_data->jump.root_action);
1891 if (grp_data->jump.hws_action)
1892 mlx5dr_action_destroy(grp_data->jump.hws_action);
1894 mlx5dr_table_destroy(tbl);
1896 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
1897 rte_flow_error_set(error, ENOMEM,
1898 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1900 "cannot allocate flow dr table");
1905 * Remove group callback.
1907 * @param[in] tool_ctx
1908 * Pointer to the hash list related context.
1910 * Pointer to the entry to be removed.
1913 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1915 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1916 struct mlx5_flow_group *grp_data =
1917 container_of(entry, struct mlx5_flow_group, entry);
1919 MLX5_ASSERT(entry && sh);
1920 /* To use the wrapper glue functions instead. */
1921 if (grp_data->jump.hws_action)
1922 mlx5dr_action_destroy(grp_data->jump.hws_action);
1923 if (grp_data->jump.root_action)
1924 mlx5dr_action_destroy(grp_data->jump.root_action);
1925 mlx5dr_table_destroy(grp_data->tbl);
1926 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1930 * Match group callback.
1932 * @param[in] tool_ctx
1933 * Pointer to the hash list related context.
1935 * Pointer to the group to be matched.
1937 * Pointer to the group matching context.
1940 * 0 on matched, 1 on miss matched.
1943 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
1946 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1947 struct mlx5_flow_group *grp_data =
1948 container_of(entry, struct mlx5_flow_group, entry);
1949 struct rte_flow_attr *attr =
1950 (struct rte_flow_attr *)ctx->data;
1952 return (grp_data->group_id != attr->group) ||
1953 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
1955 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
1957 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
1962 * Clone group entry callback.
1964 * @param[in] tool_ctx
1965 * Pointer to the hash list related context.
1967 * Pointer to the group to be matched.
1969 * Pointer to the group matching context.
1972 * 0 on matched, 1 on miss matched.
1974 struct mlx5_list_entry *
1975 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
1978 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1979 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1980 struct mlx5_flow_group *grp_data;
1981 struct rte_flow_error *error = ctx->error;
1984 grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1986 rte_flow_error_set(error, ENOMEM,
1987 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1989 "cannot allocate flow table data entry");
1992 memcpy(grp_data, oentry, sizeof(*grp_data));
1993 grp_data->idx = idx;
1994 return &grp_data->entry;
1998 * Free cloned group entry callback.
2000 * @param[in] tool_ctx
2001 * Pointer to the hash list related context.
2003 * Pointer to the group to be freed.
2006 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2008 struct mlx5_dev_ctx_shared *sh = tool_ctx;
2009 struct mlx5_flow_group *grp_data =
2010 container_of(entry, struct mlx5_flow_group, entry);
2012 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
2016 * Configure port HWS resources.
2019 * Pointer to the rte_eth_dev structure.
2020 * @param[in] port_attr
2021 * Port configuration attributes.
2022 * @param[in] nb_queue
2024 * @param[in] queue_attr
2025 * Array that holds attributes for each flow queue.
2027 * Pointer to error structure.
2030 * 0 on success, a negative errno value otherwise and rte_errno is set.
2034 flow_hw_configure(struct rte_eth_dev *dev,
2035 const struct rte_flow_port_attr *port_attr,
2037 const struct rte_flow_queue_attr *queue_attr[],
2038 struct rte_flow_error *error)
2040 struct mlx5_priv *priv = dev->data->dev_private;
2041 struct mlx5dr_context *dr_ctx = NULL;
2042 struct mlx5dr_context_attr dr_ctx_attr = {0};
2043 struct mlx5_hw_q *hw_q;
2044 struct mlx5_hw_q_job *job = NULL;
2045 uint32_t mem_size, i, j;
2046 struct mlx5_indexed_pool_config cfg = {
2047 .size = sizeof(struct mlx5_action_construct_data),
2050 .release_mem_en = !!priv->sh->config.reclaim_mode,
2051 .malloc = mlx5_malloc,
2053 .type = "mlx5_hw_action_construct_data",
2056 if (!port_attr || !nb_queue || !queue_attr) {
2060 /* In case re-configuring, release existing context at first. */
2063 for (i = 0; i < nb_queue; i++) {
2064 hw_q = &priv->hw_q[i];
2065 /* Make sure all queues are empty. */
2066 if (hw_q->size != hw_q->job_idx) {
2071 flow_hw_resource_release(dev);
2073 priv->acts_ipool = mlx5_ipool_create(&cfg);
2074 if (!priv->acts_ipool)
2076 /* Allocate the queue job descriptor LIFO. */
2077 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
2078 for (i = 0; i < nb_queue; i++) {
2080 * Check if the queues' size are all the same as the
2081 * limitation from HWS layer.
2083 if (queue_attr[i]->size != queue_attr[0]->size) {
2087 mem_size += (sizeof(struct mlx5_hw_q_job *) +
2088 sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
2089 sizeof(struct mlx5_hw_q_job)) *
2090 queue_attr[0]->size;
2092 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
2098 for (i = 0; i < nb_queue; i++) {
2099 uint8_t *encap = NULL;
2101 priv->hw_q[i].job_idx = queue_attr[i]->size;
2102 priv->hw_q[i].size = queue_attr[i]->size;
2104 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
2105 &priv->hw_q[nb_queue];
2107 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
2108 &job[queue_attr[i - 1]->size];
2109 job = (struct mlx5_hw_q_job *)
2110 &priv->hw_q[i].job[queue_attr[i]->size];
2111 encap = (uint8_t *)&job[queue_attr[i]->size];
2112 for (j = 0; j < queue_attr[i]->size; j++) {
2113 job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
2114 priv->hw_q[i].job[j] = &job[j];
2117 dr_ctx_attr.pd = priv->sh->cdev->pd;
2118 dr_ctx_attr.queues = nb_queue;
2119 /* Queue size should all be the same. Take the first one. */
2120 dr_ctx_attr.queue_size = queue_attr[0]->size;
2121 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
2122 /* rte_errno has been updated by HWS layer. */
2125 priv->dr_ctx = dr_ctx;
2126 priv->nb_queue = nb_queue;
2127 /* Add global actions. */
2128 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
2129 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
2130 priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
2131 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
2132 if (!priv->hw_drop[i][j])
2135 priv->hw_tag[i] = mlx5dr_action_create_tag
2136 (priv->dr_ctx, mlx5_hw_act_flag[i][0]);
2137 if (!priv->hw_tag[i])
2142 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
2143 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
2144 if (priv->hw_drop[i][j])
2145 mlx5dr_action_destroy(priv->hw_drop[i][j]);
2147 if (priv->hw_tag[i])
2148 mlx5dr_action_destroy(priv->hw_tag[i]);
2151 claim_zero(mlx5dr_context_close(dr_ctx));
2152 mlx5_free(priv->hw_q);
2154 if (priv->acts_ipool) {
2155 mlx5_ipool_destroy(priv->acts_ipool);
2156 priv->acts_ipool = NULL;
2158 return rte_flow_error_set(error, rte_errno,
2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2160 "fail to configure port");
2164 * Release HWS resources.
2167 * Pointer to the rte_eth_dev structure.
2170 flow_hw_resource_release(struct rte_eth_dev *dev)
2172 struct mlx5_priv *priv = dev->data->dev_private;
2173 struct rte_flow_template_table *tbl;
2174 struct rte_flow_pattern_template *it;
2175 struct rte_flow_actions_template *at;
2180 while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
2181 tbl = LIST_FIRST(&priv->flow_hw_tbl);
2182 flow_hw_table_destroy(dev, tbl, NULL);
2184 while (!LIST_EMPTY(&priv->flow_hw_itt)) {
2185 it = LIST_FIRST(&priv->flow_hw_itt);
2186 flow_hw_pattern_template_destroy(dev, it, NULL);
2188 while (!LIST_EMPTY(&priv->flow_hw_at)) {
2189 at = LIST_FIRST(&priv->flow_hw_at);
2190 flow_hw_actions_template_destroy(dev, at, NULL);
2192 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
2193 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
2194 if (priv->hw_drop[i][j])
2195 mlx5dr_action_destroy(priv->hw_drop[i][j]);
2197 if (priv->hw_tag[i])
2198 mlx5dr_action_destroy(priv->hw_tag[i]);
2200 if (priv->acts_ipool) {
2201 mlx5_ipool_destroy(priv->acts_ipool);
2202 priv->acts_ipool = NULL;
2204 mlx5_free(priv->hw_q);
2206 claim_zero(mlx5dr_context_close(priv->dr_ctx));
2207 priv->dr_ctx = NULL;
2212 * Create shared action.
2215 * Pointer to the rte_eth_dev structure.
2217 * Which queue to be used..
2219 * Operation attribute.
2221 * Indirect action configuration.
2223 * rte_flow action detail.
2224 * @param[in] user_data
2225 * Pointer to the user_data.
2227 * Pointer to error structure.
2230 * Action handle on success, NULL otherwise and rte_errno is set.
2232 static struct rte_flow_action_handle *
2233 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
2234 const struct rte_flow_op_attr *attr,
2235 const struct rte_flow_indir_action_conf *conf,
2236 const struct rte_flow_action *action,
2238 struct rte_flow_error *error)
2240 RTE_SET_USED(queue);
2242 RTE_SET_USED(user_data);
2243 return flow_dv_action_create(dev, conf, action, error);
2247 * Update shared action.
2250 * Pointer to the rte_eth_dev structure.
2252 * Which queue to be used..
2254 * Operation attribute.
2256 * Action handle to be updated.
2259 * @param[in] user_data
2260 * Pointer to the user_data.
2262 * Pointer to error structure.
2265 * 0 on success, negative value otherwise and rte_errno is set.
2268 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
2269 const struct rte_flow_op_attr *attr,
2270 struct rte_flow_action_handle *handle,
2273 struct rte_flow_error *error)
2275 RTE_SET_USED(queue);
2277 RTE_SET_USED(user_data);
2278 return flow_dv_action_update(dev, handle, update, error);
2282 * Destroy shared action.
2285 * Pointer to the rte_eth_dev structure.
2287 * Which queue to be used..
2289 * Operation attribute.
2291 * Action handle to be destroyed.
2292 * @param[in] user_data
2293 * Pointer to the user_data.
2295 * Pointer to error structure.
2298 * 0 on success, negative value otherwise and rte_errno is set.
2301 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
2302 const struct rte_flow_op_attr *attr,
2303 struct rte_flow_action_handle *handle,
2305 struct rte_flow_error *error)
2307 RTE_SET_USED(queue);
2309 RTE_SET_USED(user_data);
2310 return flow_dv_action_destroy(dev, handle, error);
2314 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
2315 .info_get = flow_hw_info_get,
2316 .configure = flow_hw_configure,
2317 .pattern_template_create = flow_hw_pattern_template_create,
2318 .pattern_template_destroy = flow_hw_pattern_template_destroy,
2319 .actions_template_create = flow_hw_actions_template_create,
2320 .actions_template_destroy = flow_hw_actions_template_destroy,
2321 .template_table_create = flow_hw_table_create,
2322 .template_table_destroy = flow_hw_table_destroy,
2323 .async_flow_create = flow_hw_async_flow_create,
2324 .async_flow_destroy = flow_hw_async_flow_destroy,
2325 .pull = flow_hw_pull,
2326 .push = flow_hw_push,
2327 .async_action_create = flow_hw_action_handle_create,
2328 .async_action_destroy = flow_hw_action_handle_destroy,
2329 .async_action_update = flow_hw_action_handle_update,
2330 .action_validate = flow_dv_action_validate,
2331 .action_create = flow_dv_action_create,
2332 .action_destroy = flow_dv_action_destroy,
2333 .action_update = flow_dv_action_update,
2334 .action_query = flow_dv_action_query,