X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=51c95f19e81aa7e6c8a57461ee970f3c278ba57f;hb=fd44e8288fbbfd58e4b8a1ba454526fffd85a436;hp=b44e9d386e95b5ba78004148226b7f63186961a1;hpb=07627fbf15063172a9cad325082066b60d3ed96c;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index b44e9d386e..51c95f19e8 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -431,6 +431,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, (int)dcopy->offset < 0 ? off_b : dcopy->offset; /* Convert entire record to big-endian format. */ actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); + ++dcopy; } else { MLX5_ASSERT(item->spec); data = flow_dv_fetch_field((const uint8_t *)item->spec + @@ -1324,6 +1325,476 @@ flow_dv_convert_action_modify_ipv6_dscp MLX5_MODIFICATION_TYPE_SET, error); } +static void +mlx5_flow_field_id_to_modify_info + (const struct rte_flow_action_modify_data *data, + struct field_modify_info *info, + uint32_t *mask, uint32_t *value, uint32_t width, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + uint32_t idx = 0; + switch (data->field) { + case RTE_FLOW_FIELD_START: + /* not supported yet */ + MLX5_ASSERT(false); + break; + case RTE_FLOW_FIELD_MAC_DST: + if (mask) { + if (data->offset < 32) { + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DMAC_47_16}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + info[idx] = (struct field_modify_info){2, 4 * idx, + MLX5_MODI_OUT_DMAC_15_0}; + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + } else { + if (data->offset < 32) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DMAC_47_16}; + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_DMAC_15_0}; + } + break; + case RTE_FLOW_FIELD_MAC_SRC: + if (mask) { + if (data->offset < 32) { + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SMAC_47_16}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + info[idx] = (struct field_modify_info){2, 4 * idx, + MLX5_MODI_OUT_SMAC_15_0}; + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + } else { + if (data->offset < 32) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SMAC_47_16}; + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_SMAC_15_0}; + } + break; + case RTE_FLOW_FIELD_VLAN_TYPE: + /* not supported yet */ + break; + case RTE_FLOW_FIELD_VLAN_ID: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_FIRST_VID}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x00000fff >> + (12 - width)); + break; + case RTE_FLOW_FIELD_MAC_TYPE: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_ETHERTYPE}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + break; + case RTE_FLOW_FIELD_IPV4_DSCP: + info[idx] = (struct field_modify_info){1, 0, + MLX5_MODI_OUT_IP_DSCP}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000003f >> + (6 - width)); + break; + case RTE_FLOW_FIELD_IPV4_TTL: + info[idx] = (struct field_modify_info){1, 0, + MLX5_MODI_OUT_IPV4_TTL}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x000000ff >> + (8 - width)); + break; + case RTE_FLOW_FIELD_IPV4_SRC: + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV4}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + break; + case RTE_FLOW_FIELD_IPV4_DST: + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV4}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + break; + case RTE_FLOW_FIELD_IPV6_DSCP: + info[idx] = (struct field_modify_info){1, 0, + MLX5_MODI_OUT_IP_DSCP}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000003f >> + (6 - width)); + break; + case RTE_FLOW_FIELD_IPV6_HOPLIMIT: + info[idx] = (struct field_modify_info){1, 0, + MLX5_MODI_OUT_IPV6_HOPLIMIT}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x000000ff >> + (8 - width)); + break; + case RTE_FLOW_FIELD_IPV6_SRC: + if (mask) { + if (data->offset < 32) { + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV6_127_96}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + if (data->offset < 64) { + info[idx] = (struct field_modify_info){4, + 4 * idx, + MLX5_MODI_OUT_SIPV6_95_64}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + if (data->offset < 96) { + info[idx] = (struct field_modify_info){4, + 8 * idx, + MLX5_MODI_OUT_SIPV6_63_32}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + info[idx] = (struct field_modify_info){4, 12 * idx, + MLX5_MODI_OUT_SIPV6_31_0}; + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } else { + if (data->offset < 32) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV6_127_96}; + if (data->offset < 64) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV6_95_64}; + if (data->offset < 96) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV6_63_32}; + if (data->offset < 128) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_SIPV6_31_0}; + } + break; + case RTE_FLOW_FIELD_IPV6_DST: + if (mask) { + if (data->offset < 32) { + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV6_127_96}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + if (data->offset < 64) { + info[idx] = (struct field_modify_info){4, + 4 * idx, + MLX5_MODI_OUT_DIPV6_95_64}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + if (data->offset < 96) { + info[idx] = (struct field_modify_info){4, + 8 * idx, + MLX5_MODI_OUT_DIPV6_63_32}; + if (width < 32) { + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + width = 0; + } else { + mask[idx] = RTE_BE32(0xffffffff); + width -= 32; + } + if (!width) + break; + ++idx; + } + info[idx] = (struct field_modify_info){4, 12 * idx, + MLX5_MODI_OUT_DIPV6_31_0}; + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } else { + if (data->offset < 32) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV6_127_96}; + if (data->offset < 64) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV6_95_64}; + if (data->offset < 96) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV6_63_32}; + if (data->offset < 128) + info[idx++] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_DIPV6_31_0}; + } + break; + case RTE_FLOW_FIELD_TCP_PORT_SRC: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_TCP_SPORT}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + break; + case RTE_FLOW_FIELD_TCP_PORT_DST: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_TCP_DPORT}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + break; + case RTE_FLOW_FIELD_TCP_SEQ_NUM: + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_TCP_SEQ_NUM}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + break; + case RTE_FLOW_FIELD_TCP_ACK_NUM: + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_OUT_TCP_ACK_NUM}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + break; + case RTE_FLOW_FIELD_TCP_FLAGS: + info[idx] = (struct field_modify_info){1, 0, + MLX5_MODI_OUT_TCP_FLAGS}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000003f >> + (6 - width)); + break; + case RTE_FLOW_FIELD_UDP_PORT_SRC: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_UDP_SPORT}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + break; + case RTE_FLOW_FIELD_UDP_PORT_DST: + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_UDP_DPORT}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0x0000ffff >> + (16 - width)); + break; + case RTE_FLOW_FIELD_VXLAN_VNI: + /* not supported yet */ + break; + case RTE_FLOW_FIELD_GENEVE_VNI: + /* not supported yet*/ + break; + case RTE_FLOW_FIELD_GTP_TEID: + info[idx] = (struct field_modify_info){4, 0, + MLX5_MODI_GTP_TEID}; + if (mask) + mask[idx] = rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + break; + case RTE_FLOW_FIELD_TAG: + { + int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, + data->level, error); + if (reg < 0) + return; + MLX5_ASSERT(reg != REG_NON); + MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } + break; + case RTE_FLOW_FIELD_MARK: + { + int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, + 0, error); + if (reg < 0) + return; + MLX5_ASSERT(reg != REG_NON); + MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } + break; + case RTE_FLOW_FIELD_META: + { + int reg = flow_dv_get_metadata_reg(dev, attr, error); + if (reg < 0) + return; + MLX5_ASSERT(reg != REG_NON); + MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } + break; + case RTE_FLOW_FIELD_POINTER: + for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { + if (mask[idx]) { + memcpy(&value[idx], + (void *)(uintptr_t)data->value, 32); + value[idx] = rte_cpu_to_be_32(value[idx]); + break; + } + } + break; + case RTE_FLOW_FIELD_VALUE: + for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { + if (mask[idx]) { + value[idx] = + rte_cpu_to_be_32((uint32_t)data->value); + break; + } + } + break; + default: + MLX5_ASSERT(false); + break; + } +} + +/** + * Convert modify_field action to DV specification. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_field + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_modify_field *conf = + (const struct rte_flow_action_modify_field *)(action->conf); + struct rte_flow_item item; + struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = { + {0, 0, 0} }; + struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = { + {0, 0, 0} }; + uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; + uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; + uint32_t type; + + if (conf->src.field == RTE_FLOW_FIELD_POINTER || + conf->src.field == RTE_FLOW_FIELD_VALUE) { + type = MLX5_MODIFICATION_TYPE_SET; + /** For SET fill the destination field (field) first. */ + mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask, + value, conf->width, dev, attr, error); + /** Then copy immediate value from source as per mask. */ + mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask, + value, conf->width, dev, attr, error); + item.spec = &value; + } else { + type = MLX5_MODIFICATION_TYPE_COPY; + /** For COPY fill the destination field (dcopy) without mask. */ + mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL, + value, conf->width, dev, attr, error); + /** Then construct the source field (field) with mask. */ + mlx5_flow_field_id_to_modify_info(&conf->src, field, mask, + value, conf->width, dev, attr, error); + } + item.mask = &mask; + return flow_dv_convert_modify_action(&item, + field, dcopy, resource, type, error); +} + /** * Validate MARK item. * @@ -1454,13 +1925,20 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, "isn't supported"); if (reg != REG_A) nic_mask.data = priv->sh->dv_meta_mask; - } else if (attr->transfer) { - return rte_flow_error_set(error, ENOTSUP, + } else { + if (attr->transfer) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "extended metadata feature " "should be enabled when " "meta item is requested " "with e-switch mode "); + if (attr->ingress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on metadata for ingress " + "is not supported in legacy " + "metadata mode"); } if (!mask) mask = &rte_flow_item_meta_mask; @@ -2445,6 +2923,11 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev, const struct rte_flow_action_mark *mark = action->conf; int ret; + if (is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "no mark action " + "if tunnel offload active"); /* Fall back if no extended metadata register support. */ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) return mlx5_flow_validate_action_mark(action, action_flags, @@ -2607,6 +3090,10 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to the action structure. + * @param[in] action_flags + * Holds the actions detected until now. * @param[out] error * Pointer to error structure. * @@ -2615,12 +3102,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, */ static int flow_dv_validate_action_count(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + uint64_t action_flags, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_count *count; if (!priv->config.devx) goto notsup_err; + if (action_flags & MLX5_FLOW_ACTION_COUNT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "duplicate count actions set"); + count = (const struct rte_flow_action_count *)action->conf; + if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) && + !priv->sh->flow_hit_aso_en) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "old age and shared count combination is not supported"); #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS return 0; #endif @@ -3077,6 +3577,7 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list, "cannot create action"); return NULL; } + cache->idx = idx; return &cache->entry; } @@ -3168,6 +3669,7 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list, "cannot create push vlan action"); return NULL; } + cache->idx = idx; return &cache->entry; } @@ -3980,6 +4482,214 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, return ret; } +static int +mlx5_flow_item_field_width(enum rte_flow_field_id field) +{ + switch (field) { + case RTE_FLOW_FIELD_START: + return 32; + case RTE_FLOW_FIELD_MAC_DST: + case RTE_FLOW_FIELD_MAC_SRC: + return 48; + case RTE_FLOW_FIELD_VLAN_TYPE: + return 16; + case RTE_FLOW_FIELD_VLAN_ID: + return 12; + case RTE_FLOW_FIELD_MAC_TYPE: + return 16; + case RTE_FLOW_FIELD_IPV4_DSCP: + return 6; + case RTE_FLOW_FIELD_IPV4_TTL: + return 8; + case RTE_FLOW_FIELD_IPV4_SRC: + case RTE_FLOW_FIELD_IPV4_DST: + return 32; + case RTE_FLOW_FIELD_IPV6_DSCP: + return 6; + case RTE_FLOW_FIELD_IPV6_HOPLIMIT: + return 8; + case RTE_FLOW_FIELD_IPV6_SRC: + case RTE_FLOW_FIELD_IPV6_DST: + return 128; + case RTE_FLOW_FIELD_TCP_PORT_SRC: + case RTE_FLOW_FIELD_TCP_PORT_DST: + return 16; + case RTE_FLOW_FIELD_TCP_SEQ_NUM: + case RTE_FLOW_FIELD_TCP_ACK_NUM: + return 32; + case RTE_FLOW_FIELD_TCP_FLAGS: + return 6; + case RTE_FLOW_FIELD_UDP_PORT_SRC: + case RTE_FLOW_FIELD_UDP_PORT_DST: + return 16; + case RTE_FLOW_FIELD_VXLAN_VNI: + case RTE_FLOW_FIELD_GENEVE_VNI: + return 24; + case RTE_FLOW_FIELD_GTP_TEID: + case RTE_FLOW_FIELD_TAG: + return 32; + case RTE_FLOW_FIELD_MARK: + return 24; + case RTE_FLOW_FIELD_META: + case RTE_FLOW_FIELD_POINTER: + case RTE_FLOW_FIELD_VALUE: + return 32; + default: + MLX5_ASSERT(false); + } + return 0; +} + +/** + * Validate the generic modify field actions. + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] attr + * Pointer to the flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * Number of header fields to modify (0 or more) on success, + * a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, + const uint64_t action_flags, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + int ret = 0; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_action_modify_field *action_modify_field = + action->conf; + uint32_t dst_width = + mlx5_flow_item_field_width(action_modify_field->dst.field); + uint32_t src_width = + mlx5_flow_item_field_width(action_modify_field->src.field); + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (ret) + return ret; + + if (action_modify_field->width == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no bits are requested to be modified"); + else if (action_modify_field->width > dst_width || + action_modify_field->width > src_width) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot modify more bits than" + " the width of a field"); + if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE && + action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) { + if ((action_modify_field->dst.offset + + action_modify_field->width > dst_width) || + (action_modify_field->dst.offset % 32)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "destination offset is too big" + " or not aligned to 4 bytes"); + if (action_modify_field->dst.level && + action_modify_field->dst.field != RTE_FLOW_FIELD_TAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot modify inner headers"); + } + if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE && + action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) { + if (!attr->transfer && !attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "modify field action " + "is not supported for group 0"); + if ((action_modify_field->src.offset + + action_modify_field->width > src_width) || + (action_modify_field->src.offset % 32)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "source offset is too big" + " or not aligned to 4 bytes"); + if (action_modify_field->src.level && + action_modify_field->src.field != RTE_FLOW_FIELD_TAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot copy from inner headers"); + } + if (action_modify_field->dst.field == + action_modify_field->src.field) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "source and destination fields" + " cannot be the same"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE || + action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "immediate value or a pointer to it" + " cannot be used as a destination"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_START || + action_modify_field->src.field == RTE_FLOW_FIELD_START) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "modifications of an arbitrary" + " place in a packet is not supported"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE || + action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "modifications of the 802.1Q Tag" + " Identifier is not supported"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI || + action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "modifications of the VXLAN Network" + " Identifier is not supported"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI || + action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "modifications of the GENEVE Network" + " Identifier is not supported"); + if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK || + action_modify_field->src.field == RTE_FLOW_FIELD_MARK) { + if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "cannot modify mark without extended" + " metadata register support"); + } + if (action_modify_field->operation != RTE_FLOW_MODIFY_SET) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "add and sub operations" + " are not supported"); + return (action_modify_field->width / 32) + + !!(action_modify_field->width % 32); +} + /** * Validate jump action. * @@ -4390,7 +5100,7 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, /** * Validate the sample action. * - * @param[in] action_flags + * @param[in, out] action_flags * Holds the actions detected until now. * @param[in] action * Pointer to the sample action. @@ -4404,6 +5114,10 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, * Pointer to the RSS action. * @param[out] sample_rss * Pointer to the RSS action in sample action list. + * @param[out] count + * Pointer to the COUNT action in sample action list. + * @param[out] fdb_mirror_limit + * Pointer to the FDB mirror limitation flag. * @param[out] error * Pointer to error structure. * @@ -4411,13 +5125,15 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_sample(uint64_t action_flags, +flow_dv_validate_action_sample(uint64_t *action_flags, const struct rte_flow_action *action, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint64_t item_flags, const struct rte_flow_action_rss *rss, const struct rte_flow_action_rss **sample_rss, + const struct rte_flow_action_count **count, + int *fdb_mirror_limit, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4442,17 +5158,17 @@ flow_dv_validate_action_sample(uint64_t action_flags, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "sample action not supported"); - if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + if (*action_flags & MLX5_FLOW_ACTION_SAMPLE) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Multiple sample actions not " "supported"); - if (action_flags & MLX5_FLOW_ACTION_METER) + if (*action_flags & MLX5_FLOW_ACTION_METER) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, meter should " "be after sample action"); - if (action_flags & MLX5_FLOW_ACTION_JUMP) + if (*action_flags & MLX5_FLOW_ACTION_JUMP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, jump should " @@ -4512,10 +5228,15 @@ flow_dv_validate_action_sample(uint64_t action_flags, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_COUNT: - ret = flow_dv_validate_action_count(dev, error); + ret = flow_dv_validate_action_count + (dev, act, + *action_flags | sub_action_flags, + error); if (ret < 0) return ret; + *count = act->conf; sub_action_flags |= MLX5_FLOW_ACTION_COUNT; + *action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; case RTE_FLOW_ACTION_TYPE_PORT_ID: @@ -4537,6 +5258,17 @@ flow_dv_validate_action_sample(uint64_t action_flags, return ret; ++actions_n; break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + ret = flow_dv_validate_action_l2_encap(dev, + sub_action_flags, + act, attr, + error); + if (ret < 0) + return ret; + sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; + ++actions_n; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4584,6 +5316,9 @@ flow_dv_validate_action_sample(uint64_t action_flags, NULL, "E-Switch must has a dest " "port for mirroring"); + if (!priv->config.hca_attr.reg_c_preserve && + priv->representor_id != -1) + *fdb_mirror_limit = 1; } /* Continue validation for Xcap actions.*/ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && @@ -5238,7 +5973,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t priority_max = priv->config.flow_prio - 1; + uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes); int ret = 0; #ifndef HAVE_MLX5DV_DR @@ -5259,8 +5994,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, if (!table) ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; #endif - if (attributes->priority != MLX5_FLOW_PRIO_RSVD && - attributes->priority >= priority_max) + if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR && + attributes->priority > lowest_priority) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, @@ -5325,6 +6060,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint16_t ether_type = 0; int actions_n = 0; uint8_t item_ipv6_proto = 0; + int fdb_mirror_limit = 0; + int modify_after_mirror = 0; const struct rte_flow_item *geneve_item = NULL; const struct rte_flow_item *gre_item = NULL; const struct rte_flow_item *gtp_item = NULL; @@ -5332,6 +6069,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action_raw_encap *encap; const struct rte_flow_action_rss *rss = NULL; const struct rte_flow_action_rss *sample_rss = NULL; + const struct rte_flow_action_count *count = NULL; + const struct rte_flow_action_count *sample_count = NULL; const struct rte_flow_item_tcp nic_tcp_mask = { .hdr = { .tcp_flags = 0xFF, @@ -5368,7 +6107,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct mlx5_dev_config *dev_conf = &priv->config; uint16_t queue_index = 0xFFFF; const struct rte_flow_item_vlan *vlan_m = NULL; - int16_t rw_act_num = 0; + uint32_t rw_act_num = 0; uint64_t is_root; const struct mlx5_flow_tunnel *tunnel; struct flow_grp_info grp_info = { @@ -5744,6 +6483,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; action_flags |= MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK_EXT; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; + } else { action_flags |= MLX5_FLOW_ACTION_FLAG; ++actions_n; @@ -5763,6 +6505,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; action_flags |= MLX5_FLOW_ACTION_MARK | MLX5_FLOW_ACTION_MARK_EXT; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; } else { action_flags |= MLX5_FLOW_ACTION_MARK; ++actions_n; @@ -5778,6 +6522,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= MLX5_FLOW_ACTION_SET_META; rw_act_num += MLX5_ACT_NUM_SET_META; break; @@ -5790,6 +6536,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= MLX5_FLOW_ACTION_SET_TAG; rw_act_num += MLX5_ACT_NUM_SET_TAG; break; @@ -5843,9 +6591,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; break; case RTE_FLOW_ACTION_TYPE_COUNT: - ret = flow_dv_validate_action_count(dev, error); + ret = flow_dv_validate_action_count(dev, actions, + action_flags, + error); if (ret < 0) return ret; + count = actions->conf; action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; @@ -5949,6 +6700,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? MLX5_FLOW_ACTION_SET_MAC_SRC : MLX5_FLOW_ACTION_SET_MAC_DST; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; /* * Even if the source and destination MAC addresses have * overlap in the header with 4B alignment, the convert @@ -5969,6 +6722,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? MLX5_FLOW_ACTION_SET_IPV4_SRC : @@ -5992,6 +6747,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? MLX5_FLOW_ACTION_SET_IPV6_SRC : @@ -6009,6 +6766,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? MLX5_FLOW_ACTION_SET_TP_SRC : @@ -6026,6 +6785,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_SET_TTL ? MLX5_FLOW_ACTION_SET_TTL : @@ -6039,6 +6800,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret) return ret; + if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && + fdb_mirror_limit) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "sample and jump action combination is not supported"); ++actions_n; action_flags |= MLX5_FLOW_ACTION_JUMP; break; @@ -6054,6 +6821,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? MLX5_FLOW_ACTION_INC_TCP_SEQ : @@ -6072,6 +6841,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? MLX5_FLOW_ACTION_INC_TCP_ACK : @@ -6111,6 +6882,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; + /* + * Validate the regular AGE action (using counter) + * mutual exclusion with share counter actions. + */ + if (!priv->sh->flow_hit_aso_en) { + if (count && count->shared) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "old age and shared count combination is not supported"); + if (sample_count) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "old age action and count must be in the same sub flow"); + } action_flags |= MLX5_FLOW_ACTION_AGE; ++actions_n; break; @@ -6125,6 +6914,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; rw_act_num += MLX5_ACT_NUM_SET_DSCP; break; @@ -6139,14 +6930,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* Count all modify-header actions as one action. */ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) ++actions_n; + if (action_flags & MLX5_FLOW_ACTION_SAMPLE) + modify_after_mirror = 1; action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; rw_act_num += MLX5_ACT_NUM_SET_DSCP; break; case RTE_FLOW_ACTION_TYPE_SAMPLE: - ret = flow_dv_validate_action_sample(action_flags, + ret = flow_dv_validate_action_sample(&action_flags, actions, dev, attr, item_flags, rss, &sample_rss, + &sample_count, + &fdb_mirror_limit, error); if (ret < 0) return ret; @@ -6164,6 +6959,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; break; + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + ret = flow_dv_validate_action_modify_field(dev, + action_flags, + actions, + attr, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; + rw_act_num += ret; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -6223,8 +7032,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * Validate the drop action mutual exclusion with other actions. * Drop action is mutually-exclusive with any other action, except for * Count action. + * Drop action compatibility with tunnel offload was already validated. */ - if ((action_flags & MLX5_FLOW_ACTION_DROP) && + if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_TUNNEL_MATCH)); + else if ((action_flags & MLX5_FLOW_ACTION_DROP) && (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -6318,13 +7130,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && mlx5_flow_ext_mreg_supported(dev)) rw_act_num += MLX5_ACT_NUM_SET_TAG; - if ((uint32_t)rw_act_num > + if (rw_act_num > flow_dv_modify_hdr_action_max(dev, is_root)) { return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many header modify" " actions to support"); } + /* Eswitch egress mirror and modify flow has limitation on CX5 */ + if (fdb_mirror_limit && modify_after_mirror) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "sample before modify action is not supported"); return 0; } @@ -7956,11 +8773,15 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, priv->pf_bond < 0 && attr->transfer) flow_dv_translate_item_source_vport (matcher, key, priv->vport_id, mask); - else - flow_dv_translate_item_meta_vport - (matcher, key, - priv->vport_meta_tag, - priv->vport_meta_mask); + /* + * We should always set the vport metadata register, + * otherwise the SW steering library can drop + * the rule if wire vport metadata value is not zero, + * it depends on kernel configuration. + */ + flow_dv_translate_item_meta_vport(matcher, key, + priv->vport_meta_tag, + priv->vport_meta_mask); } else { flow_dv_translate_item_source_vport(matcher, key, priv->vport_id, mask); @@ -9056,10 +9877,6 @@ flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, flow_dv_tag_release(dev, act_res->rix_tag); act_res->rix_tag = 0; } - if (act_res->cnt) { - flow_dv_counter_free(dev, act_res->cnt); - act_res->cnt = 0; - } if (act_res->rix_jump) { flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump); act_res->rix_jump = 0; @@ -9141,22 +9958,18 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, "for sample"); goto error; } - int ret; - cache_resource->normal_path_tbl = tbl; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - ret = mlx5_flow_os_create_flow_action_default_miss - (&cache_resource->default_miss); - if (!ret) { + if (!sh->default_miss_action) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss " - "action"); + "default miss action was not " + "created"); goto error; } sample_dv_actions[resource->sample_act.actions_num++] = - cache_resource->default_miss; + sh->default_miss_action; } /* Create a DR sample action */ sampler_attr.sample_ratio = cache_resource->ratio; @@ -9176,11 +9989,7 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, cache_resource->dev = dev; return &cache_resource->entry; error: - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && - cache_resource->default_miss) - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->default_miss)); - else + if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB) flow_dv_sample_sub_actions_release(dev, &cache_resource->sample_idx); if (cache_resource->normal_path_tbl) @@ -9460,6 +10269,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, struct mlx5_flow_sub_actions_list *sample_act; struct mlx5_flow_sub_actions_idx *sample_idx; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct rte_flow *flow = dev_flow->flow; struct mlx5_flow_rss_desc *rss_desc; uint64_t action_flags = 0; @@ -9570,21 +10380,22 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, } case RTE_FLOW_ACTION_TYPE_COUNT: { - uint32_t counter; - - counter = flow_dv_translate_create_counter(dev, - dev_flow, sub_actions->conf, 0); - if (!counter) - return rte_flow_error_set + if (!flow->counter) { + flow->counter = + flow_dv_translate_create_counter(dev, + dev_flow, sub_actions->conf, + 0); + if (!flow->counter) + return rte_flow_error_set (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "cannot create counter" - " object."); - sample_idx->cnt = counter; + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create counter" + " object."); + } sample_act->dr_cnt_action = (flow_dv_counter_get_by_idx(dev, - counter, NULL))->action; + flow->counter, NULL))->action; sample_actions[sample_act->actions_num++] = sample_act->dr_cnt_action; action_flags |= MLX5_FLOW_ACTION_COUNT; @@ -9620,6 +10431,8 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, action_flags |= MLX5_FLOW_ACTION_PORT_ID; break; } + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: /* Save the encap resource before sample */ pre_rix = dev_flow->handle->dvh.rix_encap_decap; @@ -10085,7 +10898,6 @@ flow_dv_translate(struct rte_eth_dev *dev, uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; - uint64_t priority = attr->priority; struct mlx5_flow_dv_matcher matcher = { .mask = { .size = sizeof(matcher.mask.buf) - @@ -10158,8 +10970,6 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.group = table; if (attr->transfer) mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; - if (priority == MLX5_FLOW_PRIO_RSVD) - priority = dev_conf->flow_prio - 1; /* number of actions must be set to 0 in case of dirty stack. */ mhdr_res->actions_num = 0; if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { @@ -10347,14 +11157,22 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_AGE: if (priv->sh->flow_hit_aso_en && attr->group) { - flow->age = flow_dv_translate_create_aso_age - (dev, action->conf, error); - if (!flow->age) - return rte_flow_error_set + /* + * Create one shared age action, to be used + * by all sub-flows. + */ + if (!flow->age) { + flow->age = + flow_dv_translate_create_aso_age + (dev, action->conf, + error); + if (!flow->age) + return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't create ASO age action"); + } dev_flow->dv.actions[actions_n++] = (flow_aso_age_get_by_idx (dev, flow->age))->dr_action; @@ -10673,6 +11491,12 @@ flow_dv_translate(struct rte_eth_dev *dev, sample_act->action_flags |= MLX5_FLOW_ACTION_ENCAP; break; + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + if (flow_dv_convert_action_modify_field + (dev, mhdr_res, actions, attr, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; + break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; if (mhdr_res->actions_num) { @@ -10684,17 +11508,22 @@ flow_dv_translate(struct rte_eth_dev *dev, handle->dvh.modify_hdr->action; } if (action_flags & MLX5_FLOW_ACTION_COUNT) { - flow->counter = - flow_dv_translate_create_counter(dev, - dev_flow, count, age); - - if (!flow->counter) - return rte_flow_error_set + /* + * Create one count action, to be used + * by all sub-flows. + */ + if (!flow->counter) { + flow->counter = + flow_dv_translate_create_counter + (dev, dev_flow, count, + age); + if (!flow->counter) + return rte_flow_error_set (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "cannot create counter" - " object."); + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "cannot create counter" + " object."); + } dev_flow->dv.actions[actions_n] = (flow_dv_counter_get_by_idx(dev, flow->counter, NULL))->action; @@ -11048,9 +11877,8 @@ flow_dv_translate(struct rte_eth_dev *dev, /* Register matcher. */ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); - matcher.priority = mlx5_os_flow_adjust_priority(dev, - priority, - matcher.priority); + matcher.priority = mlx5_get_matcher_priority(dev, attr, + matcher.priority); /* reserved field no needs to be set to 0 here. */ tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; @@ -11080,28 +11908,51 @@ flow_dv_translate(struct rte_eth_dev *dev, static int __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, const uint64_t hash_fields, - const int tunnel, uint32_t hrxq_idx) { - uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + uint32_t *hrxqs = action->hrxq; switch (hash_fields & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_SRC_ONLY: hrxqs[0] = hrxq_idx; return 0; case MLX5_RSS_HASH_IPV4_TCP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY: hrxqs[1] = hrxq_idx; return 0; case MLX5_RSS_HASH_IPV4_UDP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY: hrxqs[2] = hrxq_idx; return 0; case MLX5_RSS_HASH_IPV6: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_SRC_ONLY: hrxqs[3] = hrxq_idx; return 0; case MLX5_RSS_HASH_IPV6_TCP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY: hrxqs[4] = hrxq_idx; return 0; case MLX5_RSS_HASH_IPV6_UDP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY: hrxqs[5] = hrxq_idx; return 0; case MLX5_RSS_HASH_NONE: @@ -11130,33 +11981,56 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action, */ static uint32_t __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, - const uint64_t hash_fields, - const int tunnel) + const uint64_t hash_fields) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); - const uint32_t *hrxqs = tunnel ? shared_rss->hrxq : - shared_rss->hrxq_tunnel; + const uint32_t *hrxqs = shared_rss->hrxq; switch (hash_fields & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_SRC_ONLY: return hrxqs[0]; case MLX5_RSS_HASH_IPV4_TCP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY: return hrxqs[1]; case MLX5_RSS_HASH_IPV4_UDP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY: return hrxqs[2]; case MLX5_RSS_HASH_IPV6: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_SRC_ONLY: return hrxqs[3]; case MLX5_RSS_HASH_IPV6_TCP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY: return hrxqs[4]; case MLX5_RSS_HASH_IPV6_UDP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY: return hrxqs[5]; case MLX5_RSS_HASH_NONE: return hrxqs[6]; default: return 0; } + } /** @@ -11198,11 +12072,19 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, n = dv->actions_n; if (dh->fate_action == MLX5_FLOW_FATE_DROP) { if (dv->transfer) { - dv->actions[n++] = priv->sh->esw_drop_action; + MLX5_ASSERT(priv->sh->dr_drop_action); + dv->actions[n++] = priv->sh->dr_drop_action; } else { +#ifdef HAVE_MLX5DV_DR + /* DR supports drop action placeholder. */ + MLX5_ASSERT(priv->sh->dr_drop_action); + dv->actions[n++] = priv->sh->dr_drop_action; +#else + /* For DV we use the explicit drop queue. */ MLX5_ASSERT(priv->drop_queue.hrxq); dv->actions[n++] = priv->drop_queue.hrxq->action; +#endif } } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && !dv_h->rix_sample && !dv_h->rix_dest_array)) { @@ -11226,9 +12108,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, rss_desc->shared_rss, - dev_flow->hash_fields, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); + dev_flow->hash_fields); if (hrxq_idx) hrxq = mlx5_ipool_get (priv->sh->ipool[MLX5_IPOOL_HRXQ], @@ -11574,11 +12454,6 @@ flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused, if (cache_resource->verbs_action) claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->verbs_action)); - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->default_miss)); - } if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); @@ -11842,10 +12717,87 @@ __flow_dv_hrxqs_release(struct rte_eth_dev *dev, */ static int __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *action) + struct mlx5_shared_action_rss *shared_rss) { - return __flow_dv_hrxqs_release(dev, &action->hrxq) + - __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel); + return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq); +} + +/** + * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to + * user input. + * + * Only one hash value is available for one L3+L4 combination: + * for example: + * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and + * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share + * same slot in mlx5_rss_hash_fields. + * + * @param[in] rss + * Pointer to the shared action RSS conf. + * @param[in, out] hash_field + * hash_field variable needed to be adjusted. + * + * @return + * void + */ +static void +__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss, + uint64_t *hash_field) +{ + uint64_t rss_types = rss->origin.types; + + switch (*hash_field & ~IBV_RX_HASH_INNER) { + case MLX5_RSS_HASH_IPV4: + if (rss_types & MLX5_IPV4_LAYER_TYPES) { + *hash_field &= ~MLX5_RSS_HASH_IPV4; + if (rss_types & ETH_RSS_L3_DST_ONLY) + *hash_field |= IBV_RX_HASH_DST_IPV4; + else if (rss_types & ETH_RSS_L3_SRC_ONLY) + *hash_field |= IBV_RX_HASH_SRC_IPV4; + else + *hash_field |= MLX5_RSS_HASH_IPV4; + } + return; + case MLX5_RSS_HASH_IPV6: + if (rss_types & MLX5_IPV6_LAYER_TYPES) { + *hash_field &= ~MLX5_RSS_HASH_IPV6; + if (rss_types & ETH_RSS_L3_DST_ONLY) + *hash_field |= IBV_RX_HASH_DST_IPV6; + else if (rss_types & ETH_RSS_L3_SRC_ONLY) + *hash_field |= IBV_RX_HASH_SRC_IPV6; + else + *hash_field |= MLX5_RSS_HASH_IPV6; + } + return; + case MLX5_RSS_HASH_IPV4_UDP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_UDP: + if (rss_types & ETH_RSS_UDP) { + *hash_field &= ~MLX5_UDP_IBV_RX_HASH; + if (rss_types & ETH_RSS_L4_DST_ONLY) + *hash_field |= IBV_RX_HASH_DST_PORT_UDP; + else if (rss_types & ETH_RSS_L4_SRC_ONLY) + *hash_field |= IBV_RX_HASH_SRC_PORT_UDP; + else + *hash_field |= MLX5_UDP_IBV_RX_HASH; + } + return; + case MLX5_RSS_HASH_IPV4_TCP: + /* fall-through. */ + case MLX5_RSS_HASH_IPV6_TCP: + if (rss_types & ETH_RSS_TCP) { + *hash_field &= ~MLX5_TCP_IBV_RX_HASH; + if (rss_types & ETH_RSS_L4_DST_ONLY) + *hash_field |= IBV_RX_HASH_DST_PORT_TCP; + else if (rss_types & ETH_RSS_L4_SRC_ONLY) + *hash_field |= IBV_RX_HASH_SRC_PORT_TCP; + else + *hash_field |= MLX5_TCP_IBV_RX_HASH; + } + return; + default: + return; + } } /** @@ -11869,52 +12821,55 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, static int __flow_dv_action_rss_setup(struct rte_eth_dev *dev, uint32_t action_idx, - struct mlx5_shared_action_rss *action, + struct mlx5_shared_action_rss *shared_rss, struct rte_flow_error *error) { struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; - if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) { + if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) { return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot setup indirection table"); } - memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); + memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN); rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; - rss_desc.const_q = action->origin.queue; - rss_desc.queue_num = action->origin.queue_num; + rss_desc.const_q = shared_rss->origin.queue; + rss_desc.queue_num = shared_rss->origin.queue_num; /* Set non-zero value to indicate a shared RSS. */ rss_desc.shared_rss = action_idx; - rss_desc.ind_tbl = action->ind_tbl; + rss_desc.ind_tbl = shared_rss->ind_tbl; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { uint32_t hrxq_idx; uint64_t hash_fields = mlx5_rss_hash_fields[i]; - int tunnel; + int tunnel = 0; - for (tunnel = 0; tunnel < 2; tunnel++) { - rss_desc.tunnel = tunnel; - rss_desc.hash_fields = hash_fields; - hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); - if (!hrxq_idx) { - rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot get hash queue"); - goto error_hrxq_new; - } - err = __flow_dv_action_rss_hrxq_set - (action, hash_fields, tunnel, hrxq_idx); - MLX5_ASSERT(!err); + __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields); + if (shared_rss->origin.level > 1) { + hash_fields |= IBV_RX_HASH_INNER; + tunnel = 1; } + rss_desc.tunnel = tunnel; + rss_desc.hash_fields = hash_fields; + hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); + if (!hrxq_idx) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error_hrxq_new; + } + err = __flow_dv_action_rss_hrxq_set + (shared_rss, hash_fields, hrxq_idx); + MLX5_ASSERT(!err); } return 0; error_hrxq_new: err = rte_errno; - __flow_dv_action_rss_hrxqs_release(dev, action); - if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true)) - action->ind_tbl = NULL; + __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true)) + shared_rss->ind_tbl = NULL; rte_errno = err; return -rte_errno; } @@ -11943,7 +12898,7 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_shared_action_rss *shared_action = NULL; + struct mlx5_shared_action_rss *shared_rss = NULL; void *queue = NULL; struct rte_flow_action_rss *origin; const uint8_t *rss_key; @@ -11953,9 +12908,9 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, RTE_SET_USED(conf); queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); - shared_action = mlx5_ipool_zmalloc + shared_rss = mlx5_ipool_zmalloc (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); - if (!shared_action || !queue) { + if (!shared_rss || !queue) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); @@ -11967,43 +12922,43 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev, "rss action number out of range"); goto error_rss_init; } - shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*shared_action->ind_tbl), - 0, SOCKET_ID_ANY); - if (!shared_action->ind_tbl) { + shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*shared_rss->ind_tbl), + 0, SOCKET_ID_ANY); + if (!shared_rss->ind_tbl) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); goto error_rss_init; } memcpy(queue, rss->queue, queue_size); - shared_action->ind_tbl->queues = queue; - shared_action->ind_tbl->queues_n = rss->queue_num; - origin = &shared_action->origin; + shared_rss->ind_tbl->queues = queue; + shared_rss->ind_tbl->queues_n = rss->queue_num; + origin = &shared_rss->origin; origin->func = rss->func; origin->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ origin->types = !rss->types ? ETH_RSS_IP : rss->types; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - origin->key = &shared_action->key[0]; + memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_rss->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; origin->queue = queue; origin->queue_num = rss->queue_num; - if (__flow_dv_action_rss_setup(dev, idx, shared_action, error)) + if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error)) goto error_rss_init; - rte_spinlock_init(&shared_action->action_rss_sl); - __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_init(&shared_rss->action_rss_sl); + __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); rte_spinlock_lock(&priv->shared_act_sl); ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], - &priv->rss_shared_actions, idx, shared_action, next); + &priv->rss_shared_actions, idx, shared_rss, next); rte_spinlock_unlock(&priv->shared_act_sl); return idx; error_rss_init: - if (shared_action) { - if (shared_action->ind_tbl) - mlx5_free(shared_action->ind_tbl); + if (shared_rss) { + if (shared_rss->ind_tbl) + mlx5_free(shared_rss->ind_tbl); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); } @@ -12048,6 +13003,13 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); + if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, + 0, 0, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared rss has references"); queue = shared_rss->ind_tbl->queues; remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true); if (remaining) @@ -12056,13 +13018,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, NULL, "shared rss indirection table has" " references"); - if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, - 0, 0, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED)) - return rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "shared rss has references"); mlx5_free(queue); rte_spinlock_lock(&priv->shared_act_sl); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], @@ -12210,6 +13165,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "invalid shared action to update"); + if (priv->obj_ops.ind_table_modify == NULL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot modify indirection table"); queue = mlx5_malloc(MLX5_MEM_ZERO, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); @@ -12911,7 +13870,8 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) &actions[0]); if (ret) goto err; - actions[1] = priv->drop_queue.hrxq->action; + actions[1] = sh->dr_drop_action ? sh->dr_drop_action : + priv->drop_queue.hrxq->action; dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, &matcher); @@ -13092,6 +14052,20 @@ flow_dv_action_validate(struct rte_eth_dev *dev, RTE_SET_USED(conf); switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: + /* + * priv->obj_ops is set according to driver capabilities. + * When DevX capabilities are + * sufficient, it is set to devx_obj_ops. + * Otherwise, it is set to ibv_obj_ops. + * ibv_obj_ops doesn't support ind_table_modify operation. + * In this case the shared RSS action can't be used. + */ + if (priv->obj_ops.ind_table_modify == NULL) + return rte_flow_error_set + (err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared RSS action not supported"); return mlx5_validate_action_rss(dev, action, err); case RTE_FLOW_ACTION_TYPE_AGE: if (!priv->sh->aso_age_mng)