1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
12 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
14 /* The maximum actions support in the flow. */
15 #define MLX5_HW_MAX_ACTS 16
17 /* Default push burst threshold. */
20 /* Default queue to flush the flows. */
21 #define MLX5_DEFAULT_FLUSH_QUEUE 0
23 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
25 /* DR action flags with different table. */
26 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
27 [MLX5DR_TABLE_TYPE_MAX] = {
29 MLX5DR_ACTION_FLAG_ROOT_RX,
30 MLX5DR_ACTION_FLAG_ROOT_TX,
31 MLX5DR_ACTION_FLAG_ROOT_FDB,
34 MLX5DR_ACTION_FLAG_HWS_RX,
35 MLX5DR_ACTION_FLAG_HWS_TX,
36 MLX5DR_ACTION_FLAG_HWS_FDB,
41 * Register destination table DR jump action.
44 * Pointer to the rte_eth_dev structure.
45 * @param[in] table_attr
46 * Pointer to the flow attributes.
47 * @param[in] dest_group
48 * The destination group ID.
50 * Pointer to error structure.
53 * Table on success, NULL otherwise and rte_errno is set.
55 static struct mlx5_hw_jump_action *
56 flow_hw_jump_action_register(struct rte_eth_dev *dev,
57 const struct rte_flow_attr *attr,
59 struct rte_flow_error *error)
61 struct mlx5_priv *priv = dev->data->dev_private;
62 struct rte_flow_attr jattr = *attr;
63 struct mlx5_flow_group *grp;
64 struct mlx5_flow_cb_ctx ctx = {
69 struct mlx5_list_entry *ge;
71 jattr.group = dest_group;
72 ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);
75 grp = container_of(ge, struct mlx5_flow_group, entry);
80 * Release jump action.
83 * Pointer to the rte_eth_dev structure.
85 * Pointer to the jump action.
89 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
91 struct mlx5_priv *priv = dev->data->dev_private;
92 struct mlx5_flow_group *grp;
95 (jump, struct mlx5_flow_group, jump);
96 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
100 * Register queue/RSS action.
103 * Pointer to the rte_eth_dev structure.
104 * @param[in] hws_flags
110 * Table on success, NULL otherwise and rte_errno is set.
112 static inline struct mlx5_hrxq*
113 flow_hw_tir_action_register(struct rte_eth_dev *dev,
115 const struct rte_flow_action *action)
117 struct mlx5_flow_rss_desc rss_desc = {
118 .hws_flags = hws_flags,
120 struct mlx5_hrxq *hrxq;
122 if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
123 const struct rte_flow_action_queue *queue = action->conf;
125 rss_desc.const_q = &queue->index;
126 rss_desc.queue_num = 1;
128 const struct rte_flow_action_rss *rss = action->conf;
130 rss_desc.queue_num = rss->queue_num;
131 rss_desc.const_q = rss->queue;
133 !rss->key ? rss_hash_default_key : rss->key,
134 MLX5_RSS_HASH_KEY_LEN);
135 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
136 rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
137 flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
138 flow_dv_action_rss_l34_hash_adjust(rss->types,
139 &rss_desc.hash_fields);
140 if (rss->level > 1) {
141 rss_desc.hash_fields |= IBV_RX_HASH_INNER;
145 hrxq = mlx5_hrxq_get(dev, &rss_desc);
150 * Destroy DR actions created by action template.
152 * For DR actions created during table creation's action translate.
153 * Need to destroy the DR action when destroying the table.
156 * Pointer to the rte_eth_dev structure.
158 * Pointer to the template HW steering DR actions.
161 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
162 struct mlx5_hw_actions *acts)
164 struct mlx5_priv *priv = dev->data->dev_private;
167 struct mlx5_flow_group *grp;
170 (acts->jump, struct mlx5_flow_group, jump);
171 mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
177 * Append dynamic action to the dynamic action list.
180 * Pointer to the port private data structure.
182 * Pointer to the template HW steering DR actions.
185 * @param[in] action_src
186 * Offset of source rte flow action.
187 * @param[in] action_dst
188 * Offset of destination DR action.
191 * 0 on success, negative value otherwise and rte_errno is set.
193 static __rte_always_inline struct mlx5_action_construct_data *
194 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
195 enum rte_flow_action_type type,
199 struct mlx5_action_construct_data *act_data;
202 act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
206 act_data->type = type;
207 act_data->action_src = action_src;
208 act_data->action_dst = action_dst;
213 * Append dynamic action to the dynamic action list.
216 * Pointer to the port private data structure.
218 * Pointer to the template HW steering DR actions.
221 * @param[in] action_src
222 * Offset of source rte flow action.
223 * @param[in] action_dst
224 * Offset of destination DR action.
227 * 0 on success, negative value otherwise and rte_errno is set.
229 static __rte_always_inline int
230 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
231 struct mlx5_hw_actions *acts,
232 enum rte_flow_action_type type,
235 { struct mlx5_action_construct_data *act_data;
237 act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
240 LIST_INSERT_HEAD(&acts->act_list, act_data, next);
245 * Translate rte_flow actions to DR action.
247 * As the action template has already indicated the actions. Translate
248 * the rte_flow actions to DR action if possbile. So in flow create
249 * stage we will save cycles from handing the actions' organizing.
250 * For the actions with limited information, need to add these to a
254 * Pointer to the rte_eth_dev structure.
255 * @param[in] table_attr
256 * Pointer to the table attributes.
257 * @param[in] item_templates
258 * Item template array to be binded to the table.
259 * @param[in/out] acts
260 * Pointer to the template HW steering DR actions.
264 * Pointer to error structure.
267 * Table on success, NULL otherwise and rte_errno is set.
270 flow_hw_actions_translate(struct rte_eth_dev *dev,
271 const struct rte_flow_template_table_attr *table_attr,
272 struct mlx5_hw_actions *acts,
273 struct rte_flow_actions_template *at,
274 struct rte_flow_error *error)
276 struct mlx5_priv *priv = dev->data->dev_private;
277 const struct rte_flow_attr *attr = &table_attr->flow_attr;
278 struct rte_flow_action *actions = at->actions;
279 struct rte_flow_action *action_start = actions;
280 struct rte_flow_action *masks = at->masks;
281 bool actions_end = false;
286 type = MLX5DR_TABLE_TYPE_FDB;
287 else if (attr->egress)
288 type = MLX5DR_TABLE_TYPE_NIC_TX;
290 type = MLX5DR_TABLE_TYPE_NIC_RX;
291 for (i = 0; !actions_end; actions++, masks++) {
292 switch (actions->type) {
293 case RTE_FLOW_ACTION_TYPE_INDIRECT:
295 case RTE_FLOW_ACTION_TYPE_VOID:
297 case RTE_FLOW_ACTION_TYPE_DROP:
298 acts->rule_acts[i++].action =
299 priv->hw_drop[!!attr->group][type];
301 case RTE_FLOW_ACTION_TYPE_JUMP:
303 uint32_t jump_group =
304 ((const struct rte_flow_action_jump *)
305 actions->conf)->group;
306 acts->jump = flow_hw_jump_action_register
307 (dev, attr, jump_group, error);
310 acts->rule_acts[i].action = (!!attr->group) ?
311 acts->jump->hws_action :
312 acts->jump->root_action;
313 } else if (__flow_hw_act_data_general_append
314 (priv, acts, actions->type,
315 actions - action_start, i)){
320 case RTE_FLOW_ACTION_TYPE_QUEUE:
322 acts->tir = flow_hw_tir_action_register
324 mlx5_hw_act_flag[!!attr->group][type],
328 acts->rule_acts[i].action =
330 } else if (__flow_hw_act_data_general_append
331 (priv, acts, actions->type,
332 actions - action_start, i)) {
337 case RTE_FLOW_ACTION_TYPE_RSS:
339 acts->tir = flow_hw_tir_action_register
341 mlx5_hw_act_flag[!!attr->group][type],
345 acts->rule_acts[i].action =
347 } else if (__flow_hw_act_data_general_append
348 (priv, acts, actions->type,
349 actions - action_start, i)) {
354 case RTE_FLOW_ACTION_TYPE_END:
365 __flow_hw_action_template_destroy(dev, acts);
366 return rte_flow_error_set(error, err,
367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
368 "fail to create rte table");
372 * Construct flow action array.
374 * For action template contains dynamic actions, these actions need to
375 * be updated according to the rte_flow action during flow creation.
378 * Pointer to the rte_eth_dev structure.
380 * Pointer to job descriptor.
382 * Pointer to translated actions from template.
384 * Array of rte_flow action need to be checked.
385 * @param[in] rule_acts
386 * Array of DR rule actions to be used during flow creation..
387 * @param[in] acts_num
388 * Pointer to the real acts_num flow has.
391 * 0 on success, negative value otherwise and rte_errno is set.
393 static __rte_always_inline int
394 flow_hw_actions_construct(struct rte_eth_dev *dev,
395 struct mlx5_hw_q_job *job,
396 struct mlx5_hw_actions *hw_acts,
397 const struct rte_flow_action actions[],
398 struct mlx5dr_rule_action *rule_acts,
401 struct rte_flow_template_table *table = job->flow->table;
402 struct mlx5_action_construct_data *act_data;
403 const struct rte_flow_action *action;
404 struct rte_flow_attr attr = {
409 memcpy(rule_acts, hw_acts->rule_acts,
410 sizeof(*rule_acts) * hw_acts->acts_num);
411 *acts_num = hw_acts->acts_num;
412 if (LIST_EMPTY(&hw_acts->act_list))
414 attr.group = table->grp->group_id;
415 ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
416 if (table->type == MLX5DR_TABLE_TYPE_FDB) {
419 } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
425 LIST_FOREACH(act_data, &hw_acts->act_list, next) {
427 struct mlx5_hw_jump_action *jump;
428 struct mlx5_hrxq *hrxq;
430 action = &actions[act_data->action_src];
431 MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
432 (int)action->type == act_data->type);
433 switch (action->type) {
434 case RTE_FLOW_ACTION_TYPE_INDIRECT:
436 case RTE_FLOW_ACTION_TYPE_VOID:
438 case RTE_FLOW_ACTION_TYPE_JUMP:
439 jump_group = ((const struct rte_flow_action_jump *)
440 action->conf)->group;
441 jump = flow_hw_jump_action_register
442 (dev, &attr, jump_group, NULL);
445 rule_acts[act_data->action_dst].action =
446 (!!attr.group) ? jump->hws_action : jump->root_action;
447 job->flow->jump = jump;
448 job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
450 case RTE_FLOW_ACTION_TYPE_RSS:
451 case RTE_FLOW_ACTION_TYPE_QUEUE:
452 hrxq = flow_hw_tir_action_register(dev,
457 rule_acts[act_data->action_dst].action = hrxq->action;
458 job->flow->hrxq = hrxq;
459 job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
469 * Enqueue HW steering flow creation.
471 * The flow will be applied to the HW only if the postpone bit is not set or
472 * the extra push function is called.
473 * The flow creation status should be checked from dequeue result.
476 * Pointer to the rte_eth_dev structure.
478 * The queue to create the flow.
480 * Pointer to the flow operation attributes.
482 * Items with flow spec value.
483 * @param[in] pattern_template_index
484 * The item pattern flow follows from the table.
486 * Action with flow spec value.
487 * @param[in] action_template_index
488 * The action pattern flow follows from the table.
489 * @param[in] user_data
490 * Pointer to the user_data.
492 * Pointer to error structure.
495 * Flow pointer on success, NULL otherwise and rte_errno is set.
497 static struct rte_flow *
498 flow_hw_async_flow_create(struct rte_eth_dev *dev,
500 const struct rte_flow_op_attr *attr,
501 struct rte_flow_template_table *table,
502 const struct rte_flow_item items[],
503 uint8_t pattern_template_index,
504 const struct rte_flow_action actions[],
505 uint8_t action_template_index,
507 struct rte_flow_error *error)
509 struct mlx5_priv *priv = dev->data->dev_private;
510 struct mlx5dr_rule_attr rule_attr = {
512 .user_data = user_data,
513 .burst = attr->postpone,
515 struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
516 struct mlx5_hw_actions *hw_acts;
517 struct rte_flow_hw *flow;
518 struct mlx5_hw_q_job *job;
519 uint32_t acts_num, flow_idx;
522 if (unlikely(!priv->hw_q[queue].job_idx)) {
526 flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
530 * Set the table here in order to know the destination table
531 * when free the flow afterwards.
534 flow->idx = flow_idx;
535 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
537 * Set the job type here in order to know if the flow memory
538 * should be freed or not when get the result from dequeue.
540 job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
542 job->user_data = user_data;
543 rule_attr.user_data = job;
544 hw_acts = &table->ats[action_template_index].acts;
545 /* Construct the flow action array based on the input actions.*/
546 flow_hw_actions_construct(dev, job, hw_acts, actions,
547 rule_acts, &acts_num);
548 ret = mlx5dr_rule_create(table->matcher,
549 pattern_template_index, items,
551 &rule_attr, &flow->rule);
553 return (struct rte_flow *)flow;
554 /* Flow created fail, return the descriptor and flow memory. */
555 mlx5_ipool_free(table->flow, flow_idx);
556 priv->hw_q[queue].job_idx++;
558 rte_flow_error_set(error, rte_errno,
559 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
560 "fail to create rte flow");
565 * Enqueue HW steering flow destruction.
567 * The flow will be applied to the HW only if the postpone bit is not set or
568 * the extra push function is called.
569 * The flow destruction status should be checked from dequeue result.
572 * Pointer to the rte_eth_dev structure.
574 * The queue to destroy the flow.
576 * Pointer to the flow operation attributes.
578 * Pointer to the flow to be destroyed.
579 * @param[in] user_data
580 * Pointer to the user_data.
582 * Pointer to error structure.
585 * 0 on success, negative value otherwise and rte_errno is set.
588 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
590 const struct rte_flow_op_attr *attr,
591 struct rte_flow *flow,
593 struct rte_flow_error *error)
595 struct mlx5_priv *priv = dev->data->dev_private;
596 struct mlx5dr_rule_attr rule_attr = {
598 .user_data = user_data,
599 .burst = attr->postpone,
601 struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
602 struct mlx5_hw_q_job *job;
605 if (unlikely(!priv->hw_q[queue].job_idx)) {
609 job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
610 job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
611 job->user_data = user_data;
613 rule_attr.user_data = job;
614 ret = mlx5dr_rule_destroy(&fh->rule, &rule_attr);
617 priv->hw_q[queue].job_idx++;
619 return rte_flow_error_set(error, rte_errno,
620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
621 "fail to create rte flow");
625 * Pull the enqueued flows.
627 * For flows enqueued from creation/destruction, the status should be
628 * checked from the dequeue result.
631 * Pointer to the rte_eth_dev structure.
633 * The queue to pull the result.
635 * Array to save the results.
637 * Available result with the array.
639 * Pointer to error structure.
642 * Result number on success, negative value otherwise and rte_errno is set.
645 flow_hw_pull(struct rte_eth_dev *dev,
647 struct rte_flow_op_result res[],
649 struct rte_flow_error *error)
651 struct mlx5_priv *priv = dev->data->dev_private;
652 struct mlx5_hw_q_job *job;
655 ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
657 return rte_flow_error_set(error, rte_errno,
658 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
659 "fail to query flow queue");
660 for (i = 0; i < ret; i++) {
661 job = (struct mlx5_hw_q_job *)res[i].user_data;
662 /* Restore user data. */
663 res[i].user_data = job->user_data;
664 if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
665 if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
666 flow_hw_jump_release(dev, job->flow->jump);
667 else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
668 mlx5_hrxq_obj_release(dev, job->flow->hrxq);
669 mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
671 priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
677 * Push the enqueued flows to HW.
679 * Force apply all the enqueued flows to the HW.
682 * Pointer to the rte_eth_dev structure.
684 * The queue to push the flow.
686 * Pointer to error structure.
689 * 0 on success, negative value otherwise and rte_errno is set.
692 flow_hw_push(struct rte_eth_dev *dev,
694 struct rte_flow_error *error)
696 struct mlx5_priv *priv = dev->data->dev_private;
699 ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
700 MLX5DR_SEND_QUEUE_ACTION_DRAIN);
702 rte_flow_error_set(error, rte_errno,
703 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
704 "fail to push flows");
711 * Drain the enqueued flows' completion.
714 * Pointer to the rte_eth_dev structure.
716 * The queue to pull the flow.
717 * @param[in] pending_rules
718 * The pending flow number.
720 * Pointer to error structure.
723 * 0 on success, negative value otherwise and rte_errno is set.
726 __flow_hw_pull_comp(struct rte_eth_dev *dev,
728 uint32_t pending_rules,
729 struct rte_flow_error *error)
731 struct rte_flow_op_result comp[BURST_THR];
732 int ret, i, empty_loop = 0;
734 flow_hw_push(dev, queue, error);
735 while (pending_rules) {
736 ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
740 rte_delay_us_sleep(20000);
741 if (++empty_loop > 5) {
742 DRV_LOG(WARNING, "No available dequeue, quit.");
747 for (i = 0; i < ret; i++) {
748 if (comp[i].status == RTE_FLOW_OP_ERROR)
749 DRV_LOG(WARNING, "Flow flush get error CQE.");
751 if ((uint32_t)ret > pending_rules) {
752 DRV_LOG(WARNING, "Flow flush get extra CQE.");
753 return rte_flow_error_set(error, ERANGE,
754 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
757 pending_rules -= ret;
764 * Flush created flows.
767 * Pointer to the rte_eth_dev structure.
769 * Pointer to error structure.
772 * 0 on success, negative value otherwise and rte_errno is set.
775 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
776 struct rte_flow_error *error)
778 struct mlx5_priv *priv = dev->data->dev_private;
779 struct mlx5_hw_q *hw_q;
780 struct rte_flow_template_table *tbl;
781 struct rte_flow_hw *flow;
782 struct rte_flow_op_attr attr = {
785 uint32_t pending_rules = 0;
790 * Ensure to push and dequeue all the enqueued flow
791 * creation/destruction jobs in case user forgot to
792 * dequeue. Or the enqueued created flows will be
793 * leaked. The forgotten dequeues would also cause
794 * flow flush get extra CQEs as expected and pending_rules
797 for (queue = 0; queue < priv->nb_queue; queue++) {
798 hw_q = &priv->hw_q[queue];
799 if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
803 /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
804 hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
805 LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
806 MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
807 if (flow_hw_async_flow_destroy(dev,
808 MLX5_DEFAULT_FLUSH_QUEUE,
810 (struct rte_flow *)flow,
815 /* Drain completion with queue size. */
816 if (pending_rules >= hw_q->size) {
817 if (__flow_hw_pull_comp(dev,
818 MLX5_DEFAULT_FLUSH_QUEUE,
819 pending_rules, error))
825 /* Drain left completion. */
827 __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
836 * The input item and action templates will be binded to the table.
837 * Flow memory will also be allocated. Matcher will be created based
838 * on the item template. Action will be translated to the dedicated
839 * DR action if possible.
842 * Pointer to the rte_eth_dev structure.
844 * Pointer to the table attributes.
845 * @param[in] item_templates
846 * Item template array to be binded to the table.
847 * @param[in] nb_item_templates
848 * Number of item template.
849 * @param[in] action_templates
850 * Action template array to be binded to the table.
851 * @param[in] nb_action_templates
852 * Number of action template.
854 * Pointer to error structure.
857 * Table on success, NULL otherwise and rte_errno is set.
859 static struct rte_flow_template_table *
860 flow_hw_table_create(struct rte_eth_dev *dev,
861 const struct rte_flow_template_table_attr *attr,
862 struct rte_flow_pattern_template *item_templates[],
863 uint8_t nb_item_templates,
864 struct rte_flow_actions_template *action_templates[],
865 uint8_t nb_action_templates,
866 struct rte_flow_error *error)
868 struct mlx5_priv *priv = dev->data->dev_private;
869 struct mlx5dr_matcher_attr matcher_attr = {0};
870 struct rte_flow_template_table *tbl = NULL;
871 struct mlx5_flow_group *grp;
872 struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
873 struct rte_flow_attr flow_attr = attr->flow_attr;
874 struct mlx5_flow_cb_ctx ctx = {
879 struct mlx5_indexed_pool_config cfg = {
880 .size = sizeof(struct rte_flow_hw),
881 .trunk_size = 1 << 12,
882 .per_core_cache = 1 << 13,
884 .release_mem_en = !!priv->sh->config.reclaim_mode,
885 .malloc = mlx5_malloc,
887 .type = "mlx5_hw_table_flow",
889 struct mlx5_list_entry *ge;
890 uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
891 uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
894 /* HWS layer accepts only 1 item template with root table. */
895 if (!attr->flow_attr.group)
897 cfg.max_idx = nb_flows;
898 /* For table has very limited flows, disable cache. */
899 if (nb_flows < cfg.trunk_size) {
900 cfg.per_core_cache = 0;
901 cfg.trunk_size = nb_flows;
903 /* Check if we requires too many templates. */
904 if (nb_item_templates > max_tpl ||
905 nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
909 /* Allocate the table memory. */
910 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
913 /* Allocate flow indexed pool. */
914 tbl->flow = mlx5_ipool_create(&cfg);
917 /* Register the flow group. */
918 ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
921 grp = container_of(ge, struct mlx5_flow_group, entry);
923 /* Prepare matcher information. */
924 matcher_attr.priority = attr->flow_attr.priority;
925 matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
926 matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
927 /* Build the item template. */
928 for (i = 0; i < nb_item_templates; i++) {
931 ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
937 mt[i] = item_templates[i]->mt;
938 tbl->its[i] = item_templates[i];
940 tbl->matcher = mlx5dr_matcher_create
941 (tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
944 tbl->nb_item_templates = nb_item_templates;
945 /* Build the action template. */
946 for (i = 0; i < nb_action_templates; i++) {
949 ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
955 LIST_INIT(&tbl->ats[i].acts.act_list);
956 err = flow_hw_actions_translate(dev, attr,
958 action_templates[i], error);
963 tbl->ats[i].action_template = action_templates[i];
965 tbl->nb_action_templates = nb_action_templates;
966 tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
967 (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
968 MLX5DR_TABLE_TYPE_NIC_RX);
969 LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
973 __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
974 __atomic_sub_fetch(&action_templates[i]->refcnt,
975 1, __ATOMIC_RELAXED);
977 i = nb_item_templates;
980 __atomic_sub_fetch(&item_templates[i]->refcnt,
981 1, __ATOMIC_RELAXED);
982 mlx5dr_matcher_destroy(tbl->matcher);
987 mlx5_hlist_unregister(priv->sh->groups,
990 mlx5_ipool_destroy(tbl->flow);
993 rte_flow_error_set(error, err,
994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
995 "fail to create rte table");
1000 * Destroy flow table.
1003 * Pointer to the rte_eth_dev structure.
1005 * Pointer to the table to be destroyed.
1007 * Pointer to error structure.
1010 * 0 on success, a negative errno value otherwise and rte_errno is set.
1013 flow_hw_table_destroy(struct rte_eth_dev *dev,
1014 struct rte_flow_template_table *table,
1015 struct rte_flow_error *error)
1017 struct mlx5_priv *priv = dev->data->dev_private;
1020 if (table->refcnt) {
1021 DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
1022 return rte_flow_error_set(error, EBUSY,
1023 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1027 LIST_REMOVE(table, next);
1028 for (i = 0; i < table->nb_item_templates; i++)
1029 __atomic_sub_fetch(&table->its[i]->refcnt,
1030 1, __ATOMIC_RELAXED);
1031 for (i = 0; i < table->nb_action_templates; i++) {
1032 __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
1033 __atomic_sub_fetch(&table->ats[i].action_template->refcnt,
1034 1, __ATOMIC_RELAXED);
1036 mlx5dr_matcher_destroy(table->matcher);
1037 mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
1038 mlx5_ipool_destroy(table->flow);
1044 * Create flow action template.
1047 * Pointer to the rte_eth_dev structure.
1049 * Pointer to the action template attributes.
1050 * @param[in] actions
1051 * Associated actions (list terminated by the END action).
1053 * List of actions that marks which of the action's member is constant.
1055 * Pointer to error structure.
1058 * Action template pointer on success, NULL otherwise and rte_errno is set.
1060 static struct rte_flow_actions_template *
1061 flow_hw_actions_template_create(struct rte_eth_dev *dev,
1062 const struct rte_flow_actions_template_attr *attr,
1063 const struct rte_flow_action actions[],
1064 const struct rte_flow_action masks[],
1065 struct rte_flow_error *error)
1067 struct mlx5_priv *priv = dev->data->dev_private;
1068 int len, act_len, mask_len, i;
1069 struct rte_flow_actions_template *at;
1071 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1072 NULL, 0, actions, error);
1075 len = RTE_ALIGN(act_len, 16);
1076 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
1077 NULL, 0, masks, error);
1080 len += RTE_ALIGN(mask_len, 16);
1081 at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
1083 rte_flow_error_set(error, ENOMEM,
1084 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1086 "cannot allocate action template");
1090 at->actions = (struct rte_flow_action *)(at + 1);
1091 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
1095 at->masks = (struct rte_flow_action *)
1096 (((uint8_t *)at->actions) + act_len);
1097 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
1098 len - act_len, masks, error);
1102 * mlx5 PMD hacks indirect action index directly to the action conf.
1103 * The rte_flow_conv() function copies the content from conf pointer.
1104 * Need to restore the indirect action index from action conf here.
1106 for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
1107 actions++, masks++, i++) {
1108 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
1109 at->actions[i].conf = actions->conf;
1110 at->masks[i].conf = masks->conf;
1113 __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
1114 LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
1122 * Destroy flow action template.
1125 * Pointer to the rte_eth_dev structure.
1126 * @param[in] template
1127 * Pointer to the action template to be destroyed.
1129 * Pointer to error structure.
1132 * 0 on success, a negative errno value otherwise and rte_errno is set.
1135 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
1136 struct rte_flow_actions_template *template,
1137 struct rte_flow_error *error __rte_unused)
1139 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1140 DRV_LOG(WARNING, "Action template %p is still in use.",
1142 return rte_flow_error_set(error, EBUSY,
1143 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1145 "action template in using");
1147 LIST_REMOVE(template, next);
1148 mlx5_free(template);
1153 * Create flow item template.
1156 * Pointer to the rte_eth_dev structure.
1158 * Pointer to the item template attributes.
1160 * The template item pattern.
1162 * Pointer to error structure.
1165 * Item template pointer on success, NULL otherwise and rte_errno is set.
1167 static struct rte_flow_pattern_template *
1168 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
1169 const struct rte_flow_pattern_template_attr *attr,
1170 const struct rte_flow_item items[],
1171 struct rte_flow_error *error)
1173 struct mlx5_priv *priv = dev->data->dev_private;
1174 struct rte_flow_pattern_template *it;
1176 it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
1178 rte_flow_error_set(error, ENOMEM,
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1181 "cannot allocate item template");
1185 it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
1188 rte_flow_error_set(error, rte_errno,
1189 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1191 "cannot create match template");
1194 __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
1195 LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
1200 * Destroy flow item template.
1203 * Pointer to the rte_eth_dev structure.
1204 * @param[in] template
1205 * Pointer to the item template to be destroyed.
1207 * Pointer to error structure.
1210 * 0 on success, a negative errno value otherwise and rte_errno is set.
1213 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
1214 struct rte_flow_pattern_template *template,
1215 struct rte_flow_error *error __rte_unused)
1217 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
1218 DRV_LOG(WARNING, "Item template %p is still in use.",
1220 return rte_flow_error_set(error, EBUSY,
1221 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1223 "item template in using");
1225 LIST_REMOVE(template, next);
1226 claim_zero(mlx5dr_match_template_destroy(template->mt));
1227 mlx5_free(template);
1232 * Get information about HWS pre-configurable resources.
1235 * Pointer to the rte_eth_dev structure.
1236 * @param[out] port_info
1237 * Pointer to port information.
1238 * @param[out] queue_info
1239 * Pointer to queue information.
1241 * Pointer to error structure.
1244 * 0 on success, a negative errno value otherwise and rte_errno is set.
1247 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
1248 struct rte_flow_port_info *port_info __rte_unused,
1249 struct rte_flow_queue_info *queue_info __rte_unused,
1250 struct rte_flow_error *error __rte_unused)
1252 /* Nothing to be updated currently. */
1253 memset(port_info, 0, sizeof(*port_info));
1254 /* Queue size is unlimited from low-level. */
1255 queue_info->max_size = UINT32_MAX;
1260 * Create group callback.
1262 * @param[in] tool_ctx
1263 * Pointer to the hash list related context.
1265 * Pointer to the group creation context.
1268 * Group entry on success, NULL otherwise and rte_errno is set.
1270 struct mlx5_list_entry *
1271 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
1273 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1274 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1275 struct rte_eth_dev *dev = ctx->dev;
1276 struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
1277 struct mlx5_priv *priv = dev->data->dev_private;
1278 struct mlx5dr_table_attr dr_tbl_attr = {0};
1279 struct rte_flow_error *error = ctx->error;
1280 struct mlx5_flow_group *grp_data;
1281 struct mlx5dr_table *tbl = NULL;
1282 struct mlx5dr_action *jump;
1285 grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1287 rte_flow_error_set(error, ENOMEM,
1288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1290 "cannot allocate flow table data entry");
1293 dr_tbl_attr.level = attr->group;
1295 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
1296 else if (attr->egress)
1297 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
1299 dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
1300 tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
1303 grp_data->tbl = tbl;
1305 /* Jump action be used by non-root table. */
1306 jump = mlx5dr_action_create_dest_table
1308 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
1311 grp_data->jump.hws_action = jump;
1312 /* Jump action be used by root table. */
1313 jump = mlx5dr_action_create_dest_table
1315 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
1316 [dr_tbl_attr.type]);
1319 grp_data->jump.root_action = jump;
1321 grp_data->idx = idx;
1322 grp_data->group_id = attr->group;
1323 grp_data->type = dr_tbl_attr.type;
1324 return &grp_data->entry;
1326 if (grp_data->jump.root_action)
1327 mlx5dr_action_destroy(grp_data->jump.root_action);
1328 if (grp_data->jump.hws_action)
1329 mlx5dr_action_destroy(grp_data->jump.hws_action);
1331 mlx5dr_table_destroy(tbl);
1333 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
1334 rte_flow_error_set(error, ENOMEM,
1335 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1337 "cannot allocate flow dr table");
1342 * Remove group callback.
1344 * @param[in] tool_ctx
1345 * Pointer to the hash list related context.
1347 * Pointer to the entry to be removed.
1350 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1352 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1353 struct mlx5_flow_group *grp_data =
1354 container_of(entry, struct mlx5_flow_group, entry);
1356 MLX5_ASSERT(entry && sh);
1357 /* To use the wrapper glue functions instead. */
1358 if (grp_data->jump.hws_action)
1359 mlx5dr_action_destroy(grp_data->jump.hws_action);
1360 if (grp_data->jump.root_action)
1361 mlx5dr_action_destroy(grp_data->jump.root_action);
1362 mlx5dr_table_destroy(grp_data->tbl);
1363 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1367 * Match group callback.
1369 * @param[in] tool_ctx
1370 * Pointer to the hash list related context.
1372 * Pointer to the group to be matched.
1374 * Pointer to the group matching context.
1377 * 0 on matched, 1 on miss matched.
1380 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
1383 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1384 struct mlx5_flow_group *grp_data =
1385 container_of(entry, struct mlx5_flow_group, entry);
1386 struct rte_flow_attr *attr =
1387 (struct rte_flow_attr *)ctx->data;
1389 return (grp_data->group_id != attr->group) ||
1390 ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
1392 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
1394 ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
1399 * Clone group entry callback.
1401 * @param[in] tool_ctx
1402 * Pointer to the hash list related context.
1404 * Pointer to the group to be matched.
1406 * Pointer to the group matching context.
1409 * 0 on matched, 1 on miss matched.
1411 struct mlx5_list_entry *
1412 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
1415 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1416 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
1417 struct mlx5_flow_group *grp_data;
1418 struct rte_flow_error *error = ctx->error;
1421 grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
1423 rte_flow_error_set(error, ENOMEM,
1424 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1426 "cannot allocate flow table data entry");
1429 memcpy(grp_data, oentry, sizeof(*grp_data));
1430 grp_data->idx = idx;
1431 return &grp_data->entry;
1435 * Free cloned group entry callback.
1437 * @param[in] tool_ctx
1438 * Pointer to the hash list related context.
1440 * Pointer to the group to be freed.
1443 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
1445 struct mlx5_dev_ctx_shared *sh = tool_ctx;
1446 struct mlx5_flow_group *grp_data =
1447 container_of(entry, struct mlx5_flow_group, entry);
1449 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
1453 * Configure port HWS resources.
1456 * Pointer to the rte_eth_dev structure.
1457 * @param[in] port_attr
1458 * Port configuration attributes.
1459 * @param[in] nb_queue
1461 * @param[in] queue_attr
1462 * Array that holds attributes for each flow queue.
1464 * Pointer to error structure.
1467 * 0 on success, a negative errno value otherwise and rte_errno is set.
1471 flow_hw_configure(struct rte_eth_dev *dev,
1472 const struct rte_flow_port_attr *port_attr,
1474 const struct rte_flow_queue_attr *queue_attr[],
1475 struct rte_flow_error *error)
1477 struct mlx5_priv *priv = dev->data->dev_private;
1478 struct mlx5dr_context *dr_ctx = NULL;
1479 struct mlx5dr_context_attr dr_ctx_attr = {0};
1480 struct mlx5_hw_q *hw_q;
1481 struct mlx5_hw_q_job *job = NULL;
1482 uint32_t mem_size, i, j;
1483 struct mlx5_indexed_pool_config cfg = {
1484 .size = sizeof(struct rte_flow_hw),
1487 .release_mem_en = !!priv->sh->config.reclaim_mode,
1488 .malloc = mlx5_malloc,
1490 .type = "mlx5_hw_action_construct_data",
1493 if (!port_attr || !nb_queue || !queue_attr) {
1497 /* In case re-configuring, release existing context at first. */
1500 for (i = 0; i < nb_queue; i++) {
1501 hw_q = &priv->hw_q[i];
1502 /* Make sure all queues are empty. */
1503 if (hw_q->size != hw_q->job_idx) {
1508 flow_hw_resource_release(dev);
1510 priv->acts_ipool = mlx5_ipool_create(&cfg);
1511 if (!priv->acts_ipool)
1513 /* Allocate the queue job descriptor LIFO. */
1514 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
1515 for (i = 0; i < nb_queue; i++) {
1517 * Check if the queues' size are all the same as the
1518 * limitation from HWS layer.
1520 if (queue_attr[i]->size != queue_attr[0]->size) {
1524 mem_size += (sizeof(struct mlx5_hw_q_job *) +
1525 sizeof(struct mlx5_hw_q_job)) *
1526 queue_attr[0]->size;
1528 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
1534 for (i = 0; i < nb_queue; i++) {
1535 priv->hw_q[i].job_idx = queue_attr[i]->size;
1536 priv->hw_q[i].size = queue_attr[i]->size;
1538 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1539 &priv->hw_q[nb_queue];
1541 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
1542 &job[queue_attr[i - 1]->size];
1543 job = (struct mlx5_hw_q_job *)
1544 &priv->hw_q[i].job[queue_attr[i]->size];
1545 for (j = 0; j < queue_attr[i]->size; j++)
1546 priv->hw_q[i].job[j] = &job[j];
1548 dr_ctx_attr.pd = priv->sh->cdev->pd;
1549 dr_ctx_attr.queues = nb_queue;
1550 /* Queue size should all be the same. Take the first one. */
1551 dr_ctx_attr.queue_size = queue_attr[0]->size;
1552 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
1553 /* rte_errno has been updated by HWS layer. */
1556 priv->dr_ctx = dr_ctx;
1557 priv->nb_queue = nb_queue;
1558 /* Add global actions. */
1559 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1560 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1561 priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
1562 (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
1563 if (!priv->hw_drop[i][j])
1569 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1570 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1571 if (!priv->hw_drop[i][j])
1573 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1577 claim_zero(mlx5dr_context_close(dr_ctx));
1578 mlx5_free(priv->hw_q);
1580 if (priv->acts_ipool) {
1581 mlx5_ipool_destroy(priv->acts_ipool);
1582 priv->acts_ipool = NULL;
1584 return rte_flow_error_set(error, rte_errno,
1585 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1586 "fail to configure port");
1590 * Release HWS resources.
1593 * Pointer to the rte_eth_dev structure.
1596 flow_hw_resource_release(struct rte_eth_dev *dev)
1598 struct mlx5_priv *priv = dev->data->dev_private;
1599 struct rte_flow_template_table *tbl;
1600 struct rte_flow_pattern_template *it;
1601 struct rte_flow_actions_template *at;
1606 while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
1607 tbl = LIST_FIRST(&priv->flow_hw_tbl);
1608 flow_hw_table_destroy(dev, tbl, NULL);
1610 while (!LIST_EMPTY(&priv->flow_hw_itt)) {
1611 it = LIST_FIRST(&priv->flow_hw_itt);
1612 flow_hw_pattern_template_destroy(dev, it, NULL);
1614 while (!LIST_EMPTY(&priv->flow_hw_at)) {
1615 at = LIST_FIRST(&priv->flow_hw_at);
1616 flow_hw_actions_template_destroy(dev, at, NULL);
1618 for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
1619 for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
1620 if (!priv->hw_drop[i][j])
1622 mlx5dr_action_destroy(priv->hw_drop[i][j]);
1625 if (priv->acts_ipool) {
1626 mlx5_ipool_destroy(priv->acts_ipool);
1627 priv->acts_ipool = NULL;
1629 mlx5_free(priv->hw_q);
1631 claim_zero(mlx5dr_context_close(priv->dr_ctx));
1632 priv->dr_ctx = NULL;
1636 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
1637 .info_get = flow_hw_info_get,
1638 .configure = flow_hw_configure,
1639 .pattern_template_create = flow_hw_pattern_template_create,
1640 .pattern_template_destroy = flow_hw_pattern_template_destroy,
1641 .actions_template_create = flow_hw_actions_template_create,
1642 .actions_template_destroy = flow_hw_actions_template_destroy,
1643 .template_table_create = flow_hw_table_create,
1644 .template_table_destroy = flow_hw_table_destroy,
1645 .async_flow_create = flow_hw_async_flow_create,
1646 .async_flow_destroy = flow_hw_async_flow_destroy,
1647 .pull = flow_hw_pull,
1648 .push = flow_hw_push,