X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=f5d98d267ba4108972ffbacdce5b70d66b72c1da;hb=c59e2faf147339e9b8375e2698919b8c053b0666;hp=b6e50b14e16acfd3cfb83609358caee1b0f6c1c4;hpb=04233f36c712ea35fe0f1d02c5c6f323a28ec588;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index b6e50b14e1..f5d98d267b 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -54,8 +54,6 @@ #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 #endif -#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ - sizeof(struct rte_flow_item_ipv4)) /* VLAN header definitions */ #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) @@ -85,16 +83,37 @@ union flow_dv_attr { * Pointer to item specification. * @param[out] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. * @param[in] tunnel_decap * Whether action is after tunnel decapsulation. */ static void flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, - bool tunnel_decap) + struct mlx5_flow *dev_flow, bool tunnel_decap) { + uint64_t layers = dev_flow->handle->layers; + + /* + * If layers is already initialized, it means this dev_flow is the + * suffix flow, the layers flags is set by the prefix flow. Need to + * use the layer flags from prefix flow as the suffix flow may not + * have the user defined items as the flow is split. + */ + if (layers) { + if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + attr->ipv4 = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) + attr->ipv6 = 1; + if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) + attr->tcp = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) + attr->udp = 1; + attr->valid = 1; + return; + } for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { uint8_t next_protocol = 0xff; - switch (item->type) { case RTE_FLOW_ITEM_TYPE_GRE: case RTE_FLOW_ITEM_TYPE_NVGRE: @@ -640,6 +659,8 @@ flow_dv_convert_action_modify_vlan_vid * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. * @param[in] tunnel_decap * Whether action is after tunnel decapsulation. * @param[out] error @@ -653,8 +674,8 @@ flow_dv_convert_action_modify_tp (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, - union flow_dv_attr *attr, bool tunnel_decap, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { const struct rte_flow_action_set_tp *conf = (const struct rte_flow_action_set_tp *)(action->conf); @@ -666,7 +687,7 @@ flow_dv_convert_action_modify_tp struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr, tunnel_decap); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->udp) { memset(&udp, 0, sizeof(udp)); memset(&udp_mask, 0, sizeof(udp_mask)); @@ -683,8 +704,8 @@ flow_dv_convert_action_modify_tp item.spec = &udp; item.mask = &udp_mask; field = modify_udp; - } - if (attr->tcp) { + } else { + MLX5_ASSERT(attr->tcp); memset(&tcp, 0, sizeof(tcp)); memset(&tcp_mask, 0, sizeof(tcp_mask)); if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { @@ -716,6 +737,8 @@ flow_dv_convert_action_modify_tp * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. * @param[in] tunnel_decap * Whether action is after tunnel decapsulation. * @param[out] error @@ -729,8 +752,8 @@ flow_dv_convert_action_modify_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_action *action, const struct rte_flow_item *items, - union flow_dv_attr *attr, bool tunnel_decap, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { const struct rte_flow_action_set_ttl *conf = (const struct rte_flow_action_set_ttl *)(action->conf); @@ -742,7 +765,7 @@ flow_dv_convert_action_modify_ttl struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr, tunnel_decap); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); @@ -752,8 +775,8 @@ flow_dv_convert_action_modify_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; - } - if (attr->ipv6) { + } else { + MLX5_ASSERT(attr->ipv6); memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = conf->ttl_value; @@ -778,6 +801,8 @@ flow_dv_convert_action_modify_ttl * Pointer to rte_flow_item objects list. * @param[in] attr * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. * @param[in] tunnel_decap * Whether action is after tunnel decapsulation. * @param[out] error @@ -790,8 +815,8 @@ static int flow_dv_convert_action_modify_dec_ttl (struct mlx5_flow_dv_modify_hdr_resource *resource, const struct rte_flow_item *items, - union flow_dv_attr *attr, bool tunnel_decap, - struct rte_flow_error *error) + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) { struct rte_flow_item item; struct rte_flow_item_ipv4 ipv4; @@ -801,7 +826,7 @@ flow_dv_convert_action_modify_dec_ttl struct field_modify_info *field; if (!attr->valid) - flow_dv_attr_init(items, attr, tunnel_decap); + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); if (attr->ipv4) { memset(&ipv4, 0, sizeof(ipv4)); memset(&ipv4_mask, 0, sizeof(ipv4_mask)); @@ -811,8 +836,8 @@ flow_dv_convert_action_modify_dec_ttl item.spec = &ipv4; item.mask = &ipv4_mask; field = modify_ipv4; - } - if (attr->ipv6) { + } else { + MLX5_ASSERT(attr->ipv6); memset(&ipv6, 0, sizeof(ipv6)); memset(&ipv6_mask, 0, sizeof(ipv6_mask)); ipv6.hdr.hop_limits = 0xFF; @@ -1383,6 +1408,11 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev, "mark id exceeds the limit"); if (!mask) mask = &nic_mask; + if (!mask->id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_mark), @@ -1428,10 +1458,6 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, item->spec, "data cannot be empty"); - if (!spec->data) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, - "data cannot be zero"); if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { if (!mlx5_flow_ext_mreg_supported(dev)) return rte_flow_error_set(error, ENOTSUP, @@ -1451,6 +1477,11 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, } if (!mask) mask = &rte_flow_item_meta_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_meta), @@ -1499,6 +1530,11 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, "data cannot be empty"); if (!mask) mask = &rte_flow_item_tag_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_tag), @@ -1675,7 +1711,7 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_priv *priv = dev->data->dev_private; (void)action; (void)attr; @@ -1706,6 +1742,11 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after pop VLAN action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "pop vlan action for VF representor " + "not supported on NIC table"); return 0; } @@ -1732,10 +1773,14 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, if (items == NULL) return; - for (; items->type != RTE_FLOW_ITEM_TYPE_END && - items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++) - ; - if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) { + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int type = items->type; + + if (type == RTE_FLOW_ITEM_TYPE_VLAN || + type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) + break; + } + if (items->type != RTE_FLOW_ITEM_TYPE_END) { const struct rte_flow_item_vlan *vlan_m = items->mask; const struct rte_flow_item_vlan *vlan_v = items->spec; @@ -1744,7 +1789,7 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, /* Only full match values are accepted */ if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == MLX5DV_FLOW_VLAN_PCP_MASK_BE) { - vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK; + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; vlan->vlan_tci |= rte_be_to_cpu_16(vlan_v->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE); @@ -1765,6 +1810,8 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, /** * Validate the push VLAN action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] item_flags @@ -1780,13 +1827,15 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_push_vlan(uint64_t action_flags, +flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, + uint64_t action_flags, uint64_t item_flags __rte_unused, const struct rte_flow_action *action, const struct rte_flow_attr *attr, struct rte_flow_error *error) { const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; + const struct mlx5_priv *priv = dev->data->dev_private; if (!attr->transfer && attr->ingress) return rte_flow_error_set(error, ENOTSUP, @@ -1809,6 +1858,11 @@ flow_dv_validate_action_push_vlan(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after push VLAN"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "push vlan action for VF representor " + "not supported on NIC table"); (void)attr; return 0; } @@ -2082,10 +2136,6 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "meta data must be within reg C0"); - if (!(conf->data & conf->mask)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "zero value has no effect"); return 0; } @@ -2178,10 +2228,14 @@ notsup_err: /** * Validate the L2 encap action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] action * Pointer to the action structure. + * @param[in] attr + * Pointer to flow attributes. * @param[out] error * Pointer to error structure. * @@ -2189,10 +2243,14 @@ notsup_err: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_l2_encap(uint64_t action_flags, +flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, + uint64_t action_flags, const struct rte_flow_action *action, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; + if (!(action->conf)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -2202,12 +2260,19 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can only have a single encap action " "in a flow"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); return 0; } /** * Validate a decap action. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] action_flags * Holds the actions detected until now. * @param[in] attr @@ -2219,10 +2284,13 @@ flow_dv_validate_action_l2_encap(uint64_t action_flags, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_decap(uint64_t action_flags, - const struct rte_flow_attr *attr, - struct rte_flow_error *error) +flow_dv_validate_action_decap(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; + if (action_flags & MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -2241,6 +2309,11 @@ flow_dv_validate_action_decap(uint64_t action_flags, NULL, "decap action not supported for " "egress"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap action for VF representor " + "not supported on NIC table"); return 0; } @@ -2249,6 +2322,8 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; /** * Validate the raw encap and decap actions. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] decap * Pointer to the decap action. * @param[in] encap @@ -2267,11 +2342,13 @@ const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; */ static int flow_dv_validate_action_raw_encap_decap - (const struct rte_flow_action_raw_decap *decap, + (struct rte_eth_dev *dev, + const struct rte_flow_action_raw_decap *decap, const struct rte_flow_action_raw_encap *encap, const struct rte_flow_attr *attr, uint64_t *action_flags, int *actions_n, struct rte_flow_error *error) { + const struct mlx5_priv *priv = dev->data->dev_private; int ret; if (encap && (!encap->size || !encap->data)) @@ -2304,7 +2381,8 @@ flow_dv_validate_action_raw_encap_decap "encap combination"); } if (decap) { - ret = flow_dv_validate_action_decap(*action_flags, attr, error); + ret = flow_dv_validate_action_decap(dev, *action_flags, attr, + error); if (ret < 0) return ret; *action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -2321,6 +2399,12 @@ flow_dv_validate_action_raw_encap_decap RTE_FLOW_ERROR_TYPE_ACTION, NULL, "more than one encap action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); *action_flags |= MLX5_FLOW_ACTION_ENCAP; ++(*actions_n); } @@ -2354,7 +2438,7 @@ flow_dv_encap_decap_resource_register struct mlx5_flow_dv_encap_decap_resource *cache_resource; struct mlx5dv_dr_domain *domain; - resource->flags = dev_flow->group ? 0 : 1; + resource->flags = dev_flow->dv.group ? 0 : 1; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) @@ -2374,7 +2458,7 @@ flow_dv_encap_decap_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->handle->dvh.encap_decap = cache_resource; return 0; } } @@ -2400,7 +2484,7 @@ flow_dv_encap_decap_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->handle->dvh.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2451,7 +2535,7 @@ flow_dv_jump_tbl_resource_register (void *)&tbl_data->jump, cnt); } rte_atomic32_inc(&tbl_data->jump.refcnt); - dev_flow->dv.jump = &tbl_data->jump; + dev_flow->handle->dvh.jump = &tbl_data->jump; return 0; } @@ -2489,7 +2573,7 @@ flow_dv_port_id_action_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->handle->dvh.port_id_action = cache_resource; return 0; } } @@ -2517,7 +2601,7 @@ flow_dv_port_id_action_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->handle->dvh.port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2560,7 +2644,7 @@ flow_dv_push_vlan_action_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->handle->dvh.push_vlan_res = cache_resource; return 0; } } @@ -2589,7 +2673,7 @@ flow_dv_push_vlan_action_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->handle->dvh.push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -3676,8 +3760,8 @@ flow_dv_modify_hdr_resource_register struct mlx5dv_dr_domain *ns; uint32_t actions_len; - resource->flags = - dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + resource->flags = dev_flow->dv.group ? 0 : + MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, resource->flags)) return rte_flow_error_set(error, EOVERFLOW, @@ -3702,7 +3786,7 @@ flow_dv_modify_hdr_resource_register (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->handle->dvh.modify_hdr = cache_resource; return 0; } } @@ -3729,7 +3813,7 @@ flow_dv_modify_hdr_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->handle->dvh.modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -3939,6 +4023,7 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) mkey_attr.pg_access = 0; mkey_attr.klm_array = NULL; mkey_attr.klm_num = 0; + mkey_attr.relaxed_ordering = 1; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { mlx5_glue->devx_umem_dereg(mem_mng->umem); @@ -4463,13 +4548,35 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action_raw_decap *decap; const struct rte_flow_action_raw_encap *encap; const struct rte_flow_action_rss *rss; - struct rte_flow_item_tcp nic_tcp_mask = { + const struct rte_flow_item_tcp nic_tcp_mask = { .hdr = { .tcp_flags = 0xFF, .src_port = RTE_BE16(UINT16_MAX), .dst_port = RTE_BE16(UINT16_MAX), } }; + const struct rte_flow_item_ipv4 nic_ipv4_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + const struct rte_flow_item_ipv6 nic_ipv6_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; uint16_t queue_index = 0xFFFF; @@ -4536,7 +4643,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv4(items, item_flags, last_item, - ether_type, NULL, + ether_type, + &nic_ipv4_mask, error); if (ret < 0) return ret; @@ -4561,7 +4669,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, &item_flags, &tunnel); ret = mlx5_flow_validate_item_ipv6(items, item_flags, last_item, - ether_type, NULL, + ether_type, + &nic_ipv6_mask, error); if (ret < 0) return ret; @@ -4841,7 +4950,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - ret = flow_dv_validate_action_push_vlan(action_flags, + ret = flow_dv_validate_action_push_vlan(dev, + action_flags, item_flags, actions, attr, error); @@ -4869,8 +4979,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: - ret = flow_dv_validate_action_l2_encap(action_flags, - actions, error); + ret = flow_dv_validate_action_l2_encap(dev, + action_flags, + actions, attr, + error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_ENCAP; @@ -4878,8 +4990,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - ret = flow_dv_validate_action_decap(action_flags, attr, - error); + ret = flow_dv_validate_action_decap(dev, action_flags, + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -4887,7 +4999,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: ret = flow_dv_validate_action_raw_encap_decap - (NULL, actions->conf, attr, &action_flags, + (dev, NULL, actions->conf, attr, &action_flags, &actions_n, error); if (ret < 0) return ret; @@ -4903,7 +5015,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, encap = actions->conf; } ret = flow_dv_validate_action_raw_encap_decap - (decap ? decap : &empty_decap, encap, + (dev, + decap ? decap : &empty_decap, encap, attr, &action_flags, &actions_n, error); if (ret < 0) @@ -5158,6 +5271,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * Internal preparation function. Allocates the DV flow size, * this size is constant. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] attr * Pointer to the flow attributes. * @param[in] items @@ -5172,24 +5287,43 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * otherwise NULL and rte_errno is set. */ static struct mlx5_flow * -flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_dv_prepare(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) { - size_t size = sizeof(struct mlx5_flow); + size_t size = sizeof(struct mlx5_flow_handle); struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; - dev_flow = rte_calloc(__func__, 1, size, 0); - if (!dev_flow) { + /* In case of corrupting the memory. */ + if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not free temporary device flow"); + return NULL; + } + dev_handle = rte_calloc(__func__, 1, size, 0); + if (!dev_handle) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "not enough memory to create flow"); + "not enough memory to create flow handle"); return NULL; } + /* No multi-thread supporting. */ + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + dev_flow->handle = dev_handle; dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* + * The matching value needs to be cleared to 0 before using. In the + * past, it will be automatically cleared when using rte_*alloc + * API. The time consumption will be almost the same as before. + */ + memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); dev_flow->ingress = attr->ingress; - dev_flow->transfer = attr->transfer; + dev_flow->dv.transfer = attr->transfer; return dev_flow; } @@ -5279,19 +5413,27 @@ flow_dv_translate_item_eth(void *matcher, void *key, /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(eth_m->type)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; if (eth_v->type) { /* When ethertype is present set mask for tagged VLAN. */ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); /* Set value for tagged VLAN if ethertype is 802.1Q. */ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) + eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + /* Return here to avoid setting match on ethertype. */ + return; + } } + /* + * HW supports match on one Ethertype, the Ethertype following the last + * VLAN tag of the packet (see PRM). + * Set match on ethertype only if ETH header is not followed by VLAN. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; } /** @@ -5337,7 +5479,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * This is workaround, masks are not supported, * and pre-validated. */ - dev_flow->dv.vf_vlan.tag = + dev_flow->handle->vf_vlan.tag = rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; } tci_m = rte_be_to_cpu_16(vlan_m->tci); @@ -5365,6 +5507,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -5373,6 +5517,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, + const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; @@ -5383,6 +5528,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, .dst_addr = RTE_BE32(0xffffffff), .type_of_service = 0xff, .next_proto_id = 0xff, + .time_to_live = 0xff, }, }; void *headers_m; @@ -5405,6 +5551,13 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv4_v) return; if (!ipv4_m) @@ -5432,7 +5585,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, ipv4_m->hdr.next_proto_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv4_m->hdr.time_to_live); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); } /** @@ -5444,6 +5600,8 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -5452,6 +5610,7 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, + const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; @@ -5494,6 +5653,13 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, else MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv6_v) return; if (!ipv6_m) @@ -5537,7 +5703,11 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, ipv6_m->hdr.proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ipv6_v->hdr.proto & ipv6_m->hdr.proto); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + /* Hop limit. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv6_m->hdr.hop_limits); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); } /** @@ -6810,7 +6980,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, (void *)cache_matcher, rte_atomic32_read(&cache_matcher->refcnt)); rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->dv.matcher = cache_matcher; + dev_flow->handle->dvh.matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); return 0; @@ -6847,7 +7017,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, /* only matcher ref++, table ref++ already done above in get API. */ rte_atomic32_inc(&cache_matcher->refcnt); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->dv.matcher = cache_matcher; + dev_flow->handle->dvh.matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, @@ -6889,7 +7059,7 @@ flow_dv_tag_resource_register cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.tag_resource = cache_resource; + dev_flow->handle->dvh.tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -6918,7 +7088,7 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot insert tag"); } - dev_flow->dv.tag_resource = cache_resource; + dev_flow->handle->dvh.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -7063,7 +7233,7 @@ static void flow_dv_hashfields_set(struct mlx5_flow *dev_flow) { struct rte_flow *flow = dev_flow->flow; - uint64_t items = dev_flow->layers; + uint64_t items = dev_flow->handle->layers; int rss_inner = 0; uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types); @@ -7153,6 +7323,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_handle *handle = dev_flow->handle; uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; @@ -7188,7 +7359,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, !!priv->fdb_def_rule, &table, error); if (ret) return ret; - dev_flow->group = table; + dev_flow->dv.group = table; if (attr->transfer) mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; if (priority == MLX5_FLOW_PRIO_RSVD) @@ -7221,7 +7392,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, (dev, &port_id_resource, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.port_id_action->action; + handle->dvh.port_id_action->action; action_flags |= MLX5_FLOW_ACTION_PORT_ID; break; case RTE_FLOW_ACTION_TYPE_FLAG: @@ -7239,12 +7410,17 @@ __flow_dv_translate(struct rte_eth_dev *dev, break; } tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - if (!dev_flow->dv.tag_resource) - if (flow_dv_tag_resource_register - (dev, tag_be, dev_flow, error)) - return -rte_errno; + /* + * Only one FLAG or MARK is supported per device flow + * right now. So the pointer to the tag resource must be + * zero before the register process. + */ + MLX5_ASSERT(!handle->dvh.tag_resource); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + handle->dvh.tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; @@ -7266,12 +7442,12 @@ __flow_dv_translate(struct rte_eth_dev *dev, tag_be = mlx5_flow_mark_set (((const struct rte_flow_action_mark *) (actions->conf))->id); - if (!dev_flow->dv.tag_resource) - if (flow_dv_tag_resource_register - (dev, tag_be, dev_flow, error)) - return -rte_errno; + MLX5_ASSERT(!handle->dvh.tag_resource); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + handle->dvh.tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_SET_META: if (flow_dv_convert_action_set_meta @@ -7321,9 +7497,9 @@ __flow_dv_translate(struct rte_eth_dev *dev, goto cnt_err; } flow->counter = flow_dv_counter_alloc(dev, - count->shared, - count->id, - dev_flow->group); + count->shared, + count->id, + dev_flow->dv.group); if (flow->counter == NULL) goto cnt_err; dev_flow->dv.actions[actions_n++] = @@ -7369,7 +7545,7 @@ cnt_err: (dev, attr, &vlan, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.push_vlan_res->action; + handle->dvh.push_vlan_res->action; action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; break; case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: @@ -7396,7 +7572,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; action_flags |= MLX5_FLOW_ACTION_ENCAP; break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: @@ -7406,7 +7582,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; action_flags |= MLX5_FLOW_ACTION_DECAP; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: @@ -7416,7 +7592,7 @@ cnt_err: (dev, actions, dev_flow, attr, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } else { /* Handle encap without preceding decap. */ if (flow_dv_create_action_l2_encap @@ -7424,7 +7600,7 @@ cnt_err: error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } action_flags |= MLX5_FLOW_ACTION_ENCAP; break; @@ -7436,7 +7612,7 @@ cnt_err: (dev, dev_flow, attr->transfer, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + handle->dvh.encap_decap->verbs_action; } /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -7468,7 +7644,7 @@ cnt_err: "cannot create jump action."); } dev_flow->dv.actions[actions_n++] = - dev_flow->dv.jump->action; + handle->dvh.jump->action; action_flags |= MLX5_FLOW_ACTION_JUMP; break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: @@ -7505,7 +7681,7 @@ cnt_err: case RTE_FLOW_ACTION_TYPE_SET_TP_DST: if (flow_dv_convert_action_modify_tp (mhdr_res, actions, items, - &flow_attr, !!(action_flags & + &flow_attr, dev_flow, !!(action_flags & MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; action_flags |= actions->type == @@ -7515,7 +7691,7 @@ cnt_err: break; case RTE_FLOW_ACTION_TYPE_DEC_TTL: if (flow_dv_convert_action_modify_dec_ttl - (mhdr_res, items, &flow_attr, + (mhdr_res, items, &flow_attr, dev_flow, !!(action_flags & MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; @@ -7524,7 +7700,7 @@ cnt_err: case RTE_FLOW_ACTION_TYPE_SET_TTL: if (flow_dv_convert_action_modify_ttl (mhdr_res, actions, items, &flow_attr, - !!(action_flags & + dev_flow, !!(action_flags & MLX5_FLOW_ACTION_DECAP), error)) return -rte_errno; action_flags |= MLX5_FLOW_ACTION_SET_TTL; @@ -7601,7 +7777,7 @@ cnt_err: (dev, mhdr_res, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[modify_action_position] = - dev_flow->dv.modify_hdr->verbs_action; + handle->dvh.modify_hdr->verbs_action; } break; default: @@ -7612,7 +7788,7 @@ cnt_err: modify_action_position = actions_n++; } dev_flow->dv.actions_n = actions_n; - dev_flow->actions = action_flags; + handle->act_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; @@ -7644,8 +7820,8 @@ cnt_err: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, tunnel, - dev_flow->group); + items, item_flags, tunnel, + dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : MLX5_FLOW_LAYER_OUTER_L3_IPV4; @@ -7667,8 +7843,8 @@ cnt_err: mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, tunnel, - dev_flow->group); + items, item_flags, tunnel, + dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : MLX5_FLOW_LAYER_OUTER_L3_IPV6; @@ -7813,7 +7989,11 @@ cnt_err: MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf)); #endif - dev_flow->layers = item_flags; + /* + * Layers may be already initialized from prefix flow if this dev_flow + * is the suffix flow. + */ + handle->layers |= item_flags; if (action_flags & MLX5_FLOW_ACTION_RSS) flow_dv_hashfields_set(dev_flow); /* Register matcher. */ @@ -7824,7 +8004,7 @@ cnt_err: /* reserved field no needs to be set to 0 here. */ tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; - tbl_key.table_id = dev_flow->group; + tbl_key.table_id = dev_flow->dv.group; if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) return -rte_errno; return 0; @@ -7848,21 +8028,27 @@ static int __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct mlx5_flow_dv *dv; + struct mlx5_flow_dv_workspace *dv; + struct mlx5_flow_handle *dh; + struct mlx5_flow_handle_dv *dv_h; struct mlx5_flow *dev_flow; struct mlx5_priv *priv = dev->data->dev_private; int n; int err; + int idx; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { + for (idx = priv->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; dv = &dev_flow->dv; + dh = dev_flow->handle; + dv_h = &dh->dvh; n = dv->actions_n; - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { - if (dev_flow->transfer) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) { + if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { - dv->hrxq = mlx5_hrxq_drop_new(dev); - if (!dv->hrxq) { + dh->hrxq = mlx5_hrxq_drop_new(dev); + if (!dh->hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -7870,9 +8056,9 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get drop hash queue"); goto error; } - dv->actions[n++] = dv->hrxq->action; + dv->actions[n++] = dh->hrxq->action; } - } else if (dev_flow->actions & + } else if (dh->act_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; @@ -7889,7 +8075,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, dev_flow->hash_fields, (*flow->rss.queue), flow->rss.queue_num, - !!(dev_flow->layers & + !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL)); } if (!hrxq) { @@ -7899,14 +8085,14 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - dv->hrxq = hrxq; - dv->actions[n++] = dv->hrxq->action; + dh->hrxq = hrxq; + dv->actions[n++] = dh->hrxq->action; } - dv->flow = - mlx5_glue->dv_create_flow(dv->matcher->matcher_object, + dh->ib_flow = + mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, dv->actions); - if (!dv->flow) { + if (!dh->ib_flow) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -7914,32 +8100,29 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, goto error; } if (priv->vmwa_context && - dev_flow->dv.vf_vlan.tag && - !dev_flow->dv.vf_vlan.created) { + dh->vf_vlan.tag && !dh->vf_vlan.created) { /* * The rule contains the VLAN pattern. * For VF we are going to create VLAN * interface to make hypervisor set correct * e-Switch vport context. */ - mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan); + mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); } } return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - struct mlx5_flow_dv *dv = &dev_flow->dv; - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + LIST_FOREACH(dh, &flow->dev_handles, next) { + if (dh->hrxq) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dh->hrxq); + dh->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -7950,17 +8133,17 @@ error: * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_matcher_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_handle *handle) { - struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; + struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", @@ -7983,17 +8166,17 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, /** * Release an encap/decap resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) +flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_encap_decap_resource *cache_resource = - flow->dv.encap_decap; + handle->dvh.encap_decap; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", @@ -8016,17 +8199,18 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_handle *handle) { - struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump; + struct mlx5_flow_dv_jump_tbl_resource *cache_resource = + handle->dvh.jump; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(cache_resource, struct mlx5_flow_tbl_data_entry, jump); @@ -8050,17 +8234,17 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, /** * Release a modify-header resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) +flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - flow->dv.modify_hdr; + handle->dvh.modify_hdr; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", @@ -8081,17 +8265,17 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) /** * Release port ID action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) +flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_port_id_action_resource *cache_resource = - flow->dv.port_id_action; + handle->dvh.port_id_action; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", @@ -8112,17 +8296,17 @@ flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) /** * Release push vlan action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow) +flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_push_vlan_action_resource *cache_resource = - flow->dv.push_vlan_res; + handle->dvh.push_vlan_res; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", @@ -8152,27 +8336,24 @@ flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow) static void __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_dv *dv; - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dh; if (!flow) return; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - dv = &dev_flow->dv; - if (dv->flow) { - claim_zero(mlx5_glue->dv_destroy_flow(dv->flow)); - dv->flow = NULL; + LIST_FOREACH(dh, &flow->dev_handles, next) { + if (dh->ib_flow) { + claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow)); + dh->ib_flow = NULL; } - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + if (dh->hrxq) { + if (dh->act_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dh->hrxq); + dh->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } } @@ -8188,7 +8369,7 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) static void __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; if (!flow) return; @@ -8201,24 +8382,25 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) mlx5_flow_meter_detach(flow->meter); flow->meter = NULL; } - while (!LIST_EMPTY(&flow->dev_flows)) { - dev_flow = LIST_FIRST(&flow->dev_flows); - LIST_REMOVE(dev_flow, next); - if (dev_flow->dv.matcher) - flow_dv_matcher_release(dev, dev_flow); - if (dev_flow->dv.encap_decap) - flow_dv_encap_decap_resource_release(dev_flow); - if (dev_flow->dv.modify_hdr) - flow_dv_modify_hdr_resource_release(dev_flow); - if (dev_flow->dv.jump) - flow_dv_jump_tbl_resource_release(dev, dev_flow); - if (dev_flow->dv.port_id_action) - flow_dv_port_id_action_resource_release(dev_flow); - if (dev_flow->dv.push_vlan_res) - flow_dv_push_vlan_action_resource_release(dev_flow); - if (dev_flow->dv.tag_resource) - flow_dv_tag_release(dev, dev_flow->dv.tag_resource); - rte_free(dev_flow); + while (!LIST_EMPTY(&flow->dev_handles)) { + dev_handle = LIST_FIRST(&flow->dev_handles); + LIST_REMOVE(dev_handle, next); + if (dev_handle->dvh.matcher) + flow_dv_matcher_release(dev, dev_handle); + if (dev_handle->dvh.encap_decap) + flow_dv_encap_decap_resource_release(dev_handle); + if (dev_handle->dvh.modify_hdr) + flow_dv_modify_hdr_resource_release(dev_handle); + if (dev_handle->dvh.jump) + flow_dv_jump_tbl_resource_release(dev, dev_handle); + if (dev_handle->dvh.port_id_action) + flow_dv_port_id_action_resource_release(dev_handle); + if (dev_handle->dvh.push_vlan_res) + flow_dv_push_vlan_action_resource_release(dev_handle); + if (dev_handle->dvh.tag_resource) + flow_dv_tag_release(dev, + dev_handle->dvh.tag_resource); + rte_free(dev_handle); } }