X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=15a4a8cd984e513d346d37749ae349d0ac6cc813;hb=9267617bb0a6918ee049b1a16062bf0185e2e843;hp=29011b12a84b83d16e7e5990e9432330139ae049;hpb=6dc0cbc6c640ce042cb7d1241d9a4202e49307c8;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 29011b12a8..15a4a8cd98 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, + [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops @@ -174,6 +175,9 @@ mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) enum rte_flow_item_type type; switch (proto_mask & proto_spec) { + case 0: + type = RTE_FLOW_ITEM_TYPE_VOID; + break; case RTE_VXLAN_GPE_TYPE_IPV4: type = RTE_FLOW_ITEM_TYPE_IPV4; break; @@ -195,13 +199,16 @@ mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) enum rte_flow_item_type type; switch (proto_mask & proto_spec) { + case 0: + type = RTE_FLOW_ITEM_TYPE_VOID; + break; case IPPROTO_UDP: type = RTE_FLOW_ITEM_TYPE_UDP; break; case IPPROTO_TCP: type = RTE_FLOW_ITEM_TYPE_TCP; break; - case IPPROTO_IP: + case IPPROTO_IPIP: type = RTE_FLOW_ITEM_TYPE_IPV4; break; case IPPROTO_IPV6: @@ -220,6 +227,9 @@ mlx5_ethertype_to_item_type(rte_be16_t type_spec, enum rte_flow_item_type type; switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case 0: + type = RTE_FLOW_ITEM_TYPE_VOID; + break; case RTE_ETHER_TYPE_TEB: type = is_tunnel ? RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; @@ -804,6 +814,102 @@ static int mlx5_flow_flex_item_release(struct rte_eth_dev *dev, const struct rte_flow_item_flex_handle *handle, struct rte_flow_error *error); +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error); +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *err); + +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error); + +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error); +static struct rte_flow_actions_template * +mlx5_flow_actions_template_create(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action actions[], + const struct rte_flow_action masks[], + struct rte_flow_error *error); +static int +mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_actions_template *template, + struct rte_flow_error *error); + +static struct rte_flow_template_table * +mlx5_flow_table_create(struct rte_eth_dev *dev, + const struct rte_flow_template_table_attr *attr, + struct rte_flow_pattern_template *item_templates[], + uint8_t nb_item_templates, + struct rte_flow_actions_template *action_templates[], + uint8_t nb_action_templates, + struct rte_flow_error *error); +static int +mlx5_flow_table_destroy(struct rte_eth_dev *dev, + struct rte_flow_template_table *table, + struct rte_flow_error *error); +static struct rte_flow * +mlx5_flow_async_flow_create(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error); +static int +mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow *flow, + void *user_data, + struct rte_flow_error *error); +static int +mlx5_flow_pull(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_op_result res[], + uint16_t n_res, + struct rte_flow_error *error); +static int +mlx5_flow_push(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_error *error); + +static struct rte_flow_action_handle * +mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action, + void *user_data, + struct rte_flow_error *error); + +static int +mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + const void *update, + void *user_data, + struct rte_flow_error *error); + +static int +mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + void *user_data, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -825,6 +931,21 @@ static const struct rte_flow_ops mlx5_flow_ops = { .get_restore_info = mlx5_flow_tunnel_get_restore_info, .flex_item_create = mlx5_flow_flex_item_create, .flex_item_release = mlx5_flow_flex_item_release, + .info_get = mlx5_flow_info_get, + .configure = mlx5_flow_port_configure, + .pattern_template_create = mlx5_flow_pattern_template_create, + .pattern_template_destroy = mlx5_flow_pattern_template_destroy, + .actions_template_create = mlx5_flow_actions_template_create, + .actions_template_destroy = mlx5_flow_actions_template_destroy, + .template_table_create = mlx5_flow_table_create, + .template_table_destroy = mlx5_flow_table_destroy, + .async_create = mlx5_flow_async_flow_create, + .async_destroy = mlx5_flow_async_flow_destroy, + .pull = mlx5_flow_pull, + .push = mlx5_flow_push, + .async_action_handle_create = mlx5_flow_async_action_handle_create, + .async_action_handle_update = mlx5_flow_async_action_handle_update, + .async_action_handle_destroy = mlx5_flow_async_action_handle_destroy, }; /* Tunnel information. */ @@ -901,7 +1022,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; enum modify_reg start_reg; bool skip_mtr_reg = false; @@ -1631,6 +1752,12 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" " same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "queue action not supported for egress."); + if (mlx5_is_external_rxq(dev, queue->index)) + return 0; if (!priv->rxqs_n) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -1645,11 +1772,6 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue is not configured"); - if (attr->egress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, - "queue action not supported for " - "egress"); return 0; } @@ -1664,7 +1786,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, * Size of the @p queues array. * @param[out] error * On error, filled with a textual error description. - * @param[out] queue + * @param[out] queue_idx * On error, filled with an offending queue index in @p queues array. * * @return @@ -1676,26 +1798,36 @@ mlx5_validate_rss_queues(struct rte_eth_dev *dev, const char **error, uint32_t *queue_idx) { const struct mlx5_priv *priv = dev->data->dev_private; - enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; + bool is_hairpin = false; + bool is_ext_rss = false; uint32_t i; for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, - queues[i]); + struct mlx5_rxq_ctrl *rxq_ctrl; + if (mlx5_is_external_rxq(dev, queues[0])) { + is_ext_rss = true; + continue; + } + if (is_ext_rss) { + *error = "Combining external and regular RSS queues is not supported"; + *queue_idx = i; + return -ENOTSUP; + } if (queues[i] >= priv->rxqs_n) { *error = "queue index out of range"; *queue_idx = i; return -EINVAL; } + rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]); if (rxq_ctrl == NULL) { *error = "queue is not configured"; *queue_idx = i; return -EINVAL; } - if (i == 0) - rxq_type = rxq_ctrl->type; - if (rxq_type != rxq_ctrl->type) { + if (i == 0 && rxq_ctrl->is_hairpin) + is_hairpin = true; + if (is_hairpin != rxq_ctrl->is_hairpin) { *error = "combining hairpin and regular RSS queues is not supported"; *queue_idx = i; return -ENOTSUP; @@ -1759,7 +1891,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, "RSS hash key too large"); - if (rss->queue_num > priv->config.ind_table_max_size) + if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue_num, @@ -1782,7 +1914,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "L4 partial RSS requested but L4 RSS" " type not specified"); - if (!priv->rxqs_n) + if (!priv->rxqs_n && priv->ext_rxqs == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No Rx queues configured"); @@ -1994,7 +2126,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); - if (attributes->transfer && !priv->config.dv_esw_en) + if (attributes->transfer && !priv->sh->config.dv_esw_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); @@ -2711,7 +2843,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint8_t vni[4]; } id = { .vlan_id = 0, }; - if (!priv->config.l3_vxlan_en) + if (!priv->sh->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN is not enabled by device" @@ -2814,6 +2946,107 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, return ret; } +/** + * Validate GRE optional item. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] attr + * Flow rule attributes. + * @param[in] gre_item + * Pointer to gre_item + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_attr *attr, + const struct rte_flow_item *gre_item, + struct rte_flow_error *error) +{ + const struct rte_flow_item_gre *gre_spec = gre_item->spec; + const struct rte_flow_item_gre *gre_mask = gre_item->mask; + const struct rte_flow_item_gre_opt *spec = item->spec; + const struct rte_flow_item_gre_opt *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + struct rte_flow_item_gre_opt nic_mask = { + .checksum_rsvd = { + .checksum = RTE_BE16(UINT16_MAX), + .reserved1 = 0x0, + }, + .key = { + .key = RTE_BE32(UINT32_MAX), + }, + .sequence = { + .sequence = RTE_BE32(UINT32_MAX), + }, + }; + + if (!(item_flags & MLX5_FLOW_LAYER_GRE)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "No preceding GRE header"); + if (item_flags & MLX5_FLOW_LAYER_INNER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GRE option following a wrong item"); + if (!spec || !mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "At least one field gre_option(checksum/key/sequence) must be specified"); + if (!gre_mask) + gre_mask = &rte_flow_item_gre_mask; + if (mask->checksum_rsvd.checksum) + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Checksum bit must be on"); + if (mask->key.key) + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Key bit must be on"); + if (mask->sequence.sequence) + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Sequence bit must be on"); + if (mask->checksum_rsvd.checksum || mask->sequence.sequence) { + if (priv->sh->steering_format_version == + MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || + ((attr->group || attr->transfer) && + !priv->sh->misc5_cap) || + (!(priv->sh->tunnel_header_0_1 && + priv->sh->tunnel_header_2_3) && + !attr->group && !attr->transfer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Checksum/Sequence not supported"); + } + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gre_opt), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); + return ret; +} + /** * Validate GRE item. * @@ -3138,7 +3371,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!priv->config.mpls_en) + if (!priv->sh->dev_cap.mpls_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS not supported or" @@ -3428,12 +3661,18 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) if (type != MLX5_FLOW_TYPE_MAX) return type; + /* + * Currently when dv_flow_en == 2, only HW steering engine is + * supported. New engines can also be chosen here if ready. + */ + if (priv->sh->config.dv_flow_en == 2) + return MLX5_FLOW_TYPE_HW; /* If no OS specific type - continue with DV/VERBS selection */ - if (attr->transfer && priv->config.dv_esw_en) + if (attr->transfer && priv->sh->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) - type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : - MLX5_FLOW_TYPE_VERBS; + type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; return type; } @@ -4105,7 +4344,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev, return true; case RTE_FLOW_ACTION_TYPE_FLAG: case RTE_FLOW_ACTION_TYPE_MARK: - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) return true; else return false; @@ -4544,8 +4783,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, uint32_t mark_id; /* Check whether extensive metadata feature is engaged. */ - if (!priv->config.dv_flow_en || - priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + if (!priv->sh->config.dv_flow_en || + priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; @@ -4604,7 +4843,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; struct mlx5_flow_mreg_copy_resource *mcp_res; const struct rte_flow_action_mark *mark; @@ -5021,6 +5260,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t tag_id = 0; struct rte_flow_item *vlan_item_dst = NULL; const struct rte_flow_item *vlan_item_src = NULL; + const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; int32_t flow_src_port = priv->representor_id; @@ -5145,7 +5385,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, if (!fm->def_policy) { sub_policy = get_meter_sub_policy(dev, flow, wks, - attr, items, error); + attr, orig_items, + error); if (!sub_policy) return -rte_errno; } else { @@ -5740,7 +5981,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action *qrss = NULL; struct rte_flow_action *ext_actions = NULL; struct mlx5_flow *dev_flow = NULL; @@ -5765,15 +6006,13 @@ flow_create_split_metadata(struct rte_eth_dev *dev, const struct rte_flow_action_queue *queue; queue = qrss->conf; - if (mlx5_rxq_get_type(dev, queue->index) == - MLX5_RXQ_TYPE_HAIRPIN) + if (mlx5_rxq_is_hairpin(dev, queue->index)) qrss = NULL; } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { const struct rte_flow_action_rss *rss; rss = qrss->conf; - if (mlx5_rxq_get_type(dev, rss->queue[0]) == - MLX5_RXQ_TYPE_HAIRPIN) + if (mlx5_rxq_is_hairpin(dev, rss->queue[0])) qrss = NULL; } } @@ -6057,6 +6296,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, fm->policy_id, NULL); MLX5_ASSERT(wks->policy); + if (wks->policy->mark) + wks->mark = 1; if (wks->policy->is_hierarchy) { wks->final_policy = mlx5_flow_meter_hierarchy_get_final_policy(dev, @@ -6837,6 +7078,15 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q creation not supported"); + return NULL; + } /* * If the device is not started yet, it is not allowed to created a * flow from application. PMD default flows and traffic control flows @@ -6912,6 +7162,14 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t num_flushed = 0, fidx = 1; struct rte_flow *flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (priv->sh->config.dv_flow_en == 2 && + type == MLX5_FLOW_TYPE_GEN) { + flow_hw_q_flow_flush(dev, NULL); + return; + } +#endif + MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { flow_list_destroy(dev, type, fidx); num_flushed++; @@ -7333,6 +7591,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q destruction not supported"); flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, (uintptr_t)(void *)flow); return 0; @@ -7430,7 +7695,13 @@ mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; + struct mlx5_priv *priv = dev->data->dev_private; + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q query not supported"); ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, error); if (ret < 0) @@ -7797,14 +8068,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) */ int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, - bool clear, uint64_t *pkts, uint64_t *bytes) + bool clear, uint64_t *pkts, uint64_t *bytes, void **action) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->counter_query(dev, cnt, clear, pkts, bytes); + return fops->counter_query(dev, cnt, clear, pkts, + bytes, action); } DRV_LOG(ERR, "port %u counter query is not supported.", @@ -7812,6 +8084,541 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, return -ENOTSUP; } +/** + * Get information about HWS pre-configurable resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[out] port_info + * Pointer to port information. + * @param[out] queue_info + * Pointer to queue information. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "info get with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->info_get(dev, port_info, queue_info, error); +} + +/** + * Configure port HWS resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] port_attr + * Port configuration attributes. + * @param[in] nb_queue + * Number of queue. + * @param[in] queue_attr + * Array that holds attributes for each flow queue. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port configure with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->configure(dev, port_attr, nb_queue, queue_attr, error); +} + +/** + * Create flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the item template attributes. + * @param[in] items + * The template item pattern. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_create(dev, attr, items, error); +} + +/** + * Destroy flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] template + * Pointer to the item template to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_destroy(dev, template, error); +} + +/** + * Create flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the action template attributes. + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] masks + * List of actions that marks which of the action's member is constant. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static struct rte_flow_actions_template * +mlx5_flow_actions_template_create(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action actions[], + const struct rte_flow_action masks[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "action create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->actions_template_create(dev, attr, actions, masks, error); +} + +/** + * Destroy flow action template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] template + * Pointer to the action template to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_actions_template *template, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "action destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->actions_template_destroy(dev, template, error); +} + +/** + * Create flow table. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the table attributes. + * @param[in] item_templates + * Item template array to be binded to the table. + * @param[in] nb_item_templates + * Number of item template. + * @param[in] action_templates + * Action template array to be binded to the table. + * @param[in] nb_action_templates + * Number of action template. + * @param[out] error + * Pointer to error structure. + * + * @return + * Table on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow_template_table * +mlx5_flow_table_create(struct rte_eth_dev *dev, + const struct rte_flow_template_table_attr *attr, + struct rte_flow_pattern_template *item_templates[], + uint8_t nb_item_templates, + struct rte_flow_actions_template *action_templates[], + uint8_t nb_action_templates, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "table create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->template_table_create(dev, + attr, + item_templates, + nb_item_templates, + action_templates, + nb_action_templates, + error); +} + +/** + * PMD destroy flow table. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] table + * Pointer to the table to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_table_destroy(struct rte_eth_dev *dev, + struct rte_flow_template_table *table, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "table destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->template_table_destroy(dev, table, error); +} + +/** + * Enqueue flow creation. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue_id + * The queue to create the flow. + * @param[in] attr + * Pointer to the flow operation attributes. + * @param[in] items + * Items with flow spec value. + * @param[in] pattern_template_index + * The item pattern flow follows from the table. + * @param[in] actions + * Action with flow spec value. + * @param[in] action_template_index + * The action pattern flow follows from the table. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * Flow pointer on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow * +mlx5_flow_async_flow_create(struct rte_eth_dev *dev, + uint32_t queue_id, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->async_flow_create(dev, queue_id, attr, table, + items, pattern_template_index, + actions, action_template_index, + user_data, error); +} + +/** + * Enqueue flow destruction. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to destroy the flow. + * @param[in] attr + * Pointer to the flow operation attributes. + * @param[in] flow + * Pointer to the flow to be destroyed. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow *flow, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->async_flow_destroy(dev, queue, attr, flow, + user_data, error); +} + +/** + * Pull the enqueued flows. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to pull the result. + * @param[in/out] res + * Array to save the results. + * @param[in] n_res + * Available result with the array. + * @param[out] error + * Pointer to error structure. + * + * @return + * Result number on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_pull(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_op_result res[], + uint16_t n_res, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q pull with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pull(dev, queue, res, n_res, error); +} + +/** + * Push the enqueued flows. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * The queue to push the flows. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_push(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow_q push with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->push(dev, queue, error); +} + +/** + * Create shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] conf + * Indirect action configuration. + * @param[in] action + * rte_flow action detail. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * Action handle on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow_action_handle * +mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + + return fops->async_action_create(dev, queue, attr, conf, action, + user_data, error); +} + +/** + * Update shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] handle + * Action handle to be updated. + * @param[in] update + * Update value. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + const void *update, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + + return fops->async_action_update(dev, queue, attr, handle, + update, user_data, error); +} + +/** + * Destroy shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] handle + * Action handle to be destroyed. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + void *user_data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + + return fops->async_action_destroy(dev, queue, attr, handle, + user_data, error); +} + /** * Allocate a new memory for the counter values wrapped by all the needed * management. @@ -8248,7 +9055,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) struct rte_flow *flow; struct rte_flow_error error; - if (!priv->config.dv_flow_en) + if (!priv->sh->config.dv_flow_en) break; /* Create internal flow, validation skips copy action. */ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, @@ -8376,6 +9183,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, "invalid flow handle"); } handle_idx = flow->dev_handles; + /* query counter */ + if (flow->counter && + (!mlx5_counter_query(dev, flow->counter, false, + &count.hits, &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + while (handle_idx) { dh = mlx5_ipool_get(priv->sh->ipool [MLX5_IPOOL_MLX5_FLOW], handle_idx); @@ -8383,16 +9200,6 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, continue; handle_idx = dh->next.next; - /* query counter */ - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - flow_dv_query_count_ptr(dev, flow->counter, - &action, error); - if (action) { - id = (uint64_t)(uintptr_t)action; - if (!mlx5_flow_query_counter(dev, flow, &count, error)) - save_dump_file(NULL, 0, type, - id, (void *)&count, file); - } /* Get modify_hdr and encap_decap buf from ipools. */ encap_decap = NULL; modify_hdr = dh->dvh.modify_hdr; @@ -8438,7 +9245,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, */ static int mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, - FILE *file, struct rte_flow_error *error) + FILE *file, struct rte_flow_error *error __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; @@ -8523,14 +9330,12 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; for (j = 1; j <= max; j++) { action = NULL; - flow_dv_query_count_ptr(dev, j, &action, error); - if (action) { - if (!flow_dv_query_count(dev, j, &count, error)) { - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - id = (uint64_t)(uintptr_t)action; - save_dump_file(NULL, 0, type, - id, (void *)&count, file); - } + if ((!mlx5_counter_query(dev, j, false, &count.hits, + &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); } } return 0; @@ -8562,7 +9367,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, struct mlx5_flow_handle *dh; struct rte_flow *flow; - if (!priv->config.dv_flow_en) { + if (!sh->config.dv_flow_en) { if (fputs("device dv flow disabled\n", file) <= 0) return -errno; return -ENOTSUP; @@ -8886,14 +9691,10 @@ int mlx5_action_handle_attach(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_indexed_pool *ipool = - priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; - struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; int ret = 0; - uint32_t idx; + struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; - ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { - struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { const char *message; uint32_t queue_idx; @@ -8909,9 +9710,7 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev) } if (ret != 0) return ret; - ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { - struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; - + LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { ret = mlx5_ind_table_obj_attach(dev, ind_tbl); if (ret != 0) { DRV_LOG(ERR, "Port %u could not attach " @@ -8920,13 +9719,12 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev) goto error; } } + return 0; error: - shared_rss_last = shared_rss; - ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { - struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; - - if (shared_rss == shared_rss_last) + ind_tbl_last = ind_tbl; + LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { + if (ind_tbl == ind_tbl_last) break; if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) DRV_LOG(CRIT, "Port %u could not detach " @@ -8949,15 +9747,10 @@ int mlx5_action_handle_detach(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_indexed_pool *ipool = - priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; - struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; int ret = 0; - uint32_t idx; - - ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { - struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; + LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { ret = mlx5_ind_table_obj_detach(dev, ind_tbl); if (ret != 0) { DRV_LOG(ERR, "Port %u could not detach " @@ -8968,11 +9761,9 @@ mlx5_action_handle_detach(struct rte_eth_dev *dev) } return 0; error: - shared_rss_last = shared_rss; - ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { - struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; - - if (shared_rss == shared_rss_last) + ind_tbl_last = ind_tbl; + LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { + if (ind_tbl == ind_tbl_last) break; if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) DRV_LOG(CRIT, "Port %u could not attach " @@ -9546,7 +10337,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - if (!priv->config.dv_flow_en) + if (!priv->sh->config.dv_flow_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "flow DV interface is off"); @@ -9965,7 +10756,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) type = mlx5_flow_os_get_type(); if (type == MLX5_FLOW_TYPE_MAX) { type = MLX5_FLOW_TYPE_VERBS; - if (priv->sh->cdev->config.devx && priv->config.dv_flow_en) + if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) type = MLX5_FLOW_TYPE_DV; } fops = flow_get_drv_ops(type);