X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=62edc4fa850746a83c371537f95f4feec264deec;hb=677fb66bde0af3b4c4ca5c02dd4a60e594fff5f4;hp=cb3d3580a6548c75cc81f0c5da7bfa5c27c40f7a;hpb=2db75e8b1d7b38024722b5fa3446090214850687;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index cb3d3580a6..62edc4fa85 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -426,6 +426,8 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, unsigned int off_b; uint32_t mask; uint32_t data; + bool next_field = true; + bool next_dcopy = true; if (i >= MLX5_MAX_MODIFY_NUM) return rte_flow_error_set(error, EINVAL, @@ -443,15 +445,13 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, size_b = sizeof(uint32_t) * CHAR_BIT - off_b - __builtin_clz(mask); MLX5_ASSERT(size_b); - size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; actions[i] = (struct mlx5_modification_cmd) { .action_type = type, .field = field->id, .offset = off_b, - .length = size_b, + .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ? + 0 : size_b, }; - /* Convert entire record to expected big-endian format. */ - actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); if (type == MLX5_MODIFICATION_TYPE_COPY) { MLX5_ASSERT(dcopy); actions[i].dst_field = dcopy->id; @@ -459,7 +459,27 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, (int)dcopy->offset < 0 ? off_b : dcopy->offset; /* Convert entire record to big-endian format. */ actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); - ++dcopy; + /* + * Destination field overflow. Copy leftovers of + * a source field to the next destination field. + */ + if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) { + actions[i].length = dcopy->size * CHAR_BIT; + field->offset += dcopy->size; + next_field = false; + } + /* + * Not enough bits in a source filed to fill a + * destination field. Switch to the next source. + */ + if (dcopy->size > field->size && + (size_b == field->size * CHAR_BIT)) { + actions[i].length = field->size * CHAR_BIT; + dcopy->offset += field->size * CHAR_BIT; + next_dcopy = false; + } + if (next_dcopy) + ++dcopy; } else { MLX5_ASSERT(item->spec); data = flow_dv_fetch_field((const uint8_t *)item->spec + @@ -468,8 +488,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, data = (data & mask) >> off_b; actions[i].data1 = rte_cpu_to_be_32(data); } + /* Convert entire record to expected big-endian format. */ + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + if (next_field) + ++field; ++i; - ++field; } while (field->size); if (resource->actions_num == i) return rte_flow_error_set(error, EINVAL, @@ -1239,8 +1262,8 @@ flow_dv_convert_action_set_meta const struct rte_flow_action_set_meta *conf, struct rte_flow_error *error) { - uint32_t data = conf->data; - uint32_t mask = conf->mask; + uint32_t mask = rte_cpu_to_be_32(conf->mask); + uint32_t data = rte_cpu_to_be_32(conf->data) & mask; struct rte_flow_item item = { .spec = &data, .mask = &mask, @@ -1253,25 +1276,14 @@ flow_dv_convert_action_set_meta if (reg < 0) return reg; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; - uint32_t shl_c0; + uint32_t shl_c0 = rte_bsf32(msk_c0); - MLX5_ASSERT(msk_c0); -#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN - shl_c0 = rte_bsf32(msk_c0); -#else - shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); -#endif - mask <<= shl_c0; - data <<= shl_c0; - MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); + data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); + mask = rte_cpu_to_be_32(mask) & msk_c0; + mask = rte_cpu_to_be_32(mask << shl_c0); } reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; /* The routine expects parameters in memory as big-endian ones. */ @@ -1356,7 +1368,8 @@ flow_dv_convert_action_modify_ipv6_dscp } static int -mlx5_flow_item_field_width(enum rte_flow_field_id field) +mlx5_flow_item_field_width(struct mlx5_dev_config *config, + enum rte_flow_field_id field) { switch (field) { case RTE_FLOW_FIELD_START: @@ -1404,7 +1417,12 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field) case RTE_FLOW_FIELD_MARK: return 24; case RTE_FLOW_FIELD_META: - return 32; + if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16) + return 16; + else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32) + return 32; + else + return 0; case RTE_FLOW_FIELD_POINTER: case RTE_FLOW_FIELD_VALUE: return 64; @@ -1424,7 +1442,10 @@ mlx5_flow_field_id_to_modify_info const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; uint32_t idx = 0; + uint32_t off = 0; uint64_t val = 0; switch (data->field) { case RTE_FLOW_FIELD_START: @@ -1432,61 +1453,63 @@ mlx5_flow_field_id_to_modify_info MLX5_ASSERT(false); break; case RTE_FLOW_FIELD_MAC_DST: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_DMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_DMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_DMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_DMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_DMAC_47_16}; } break; case RTE_FLOW_FIELD_MAC_SRC: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_SMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_SMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_SMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_SMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_SMAC_47_16}; } break; case RTE_FLOW_FIELD_VLAN_TYPE: @@ -1777,17 +1800,28 @@ mlx5_flow_field_id_to_modify_info break; case RTE_FLOW_FIELD_META: { + unsigned int xmeta = config->dv_xmeta_en; int reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); - info[idx] = (struct field_modify_info){4, 0, - reg_to_field[reg]}; - if (mask) - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (xmeta == MLX5_XMETA_MODE_META16) { + info[idx] = (struct field_modify_info){2, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); + } else if (xmeta == MLX5_XMETA_MODE_META32) { + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } else { + MLX5_ASSERT(false); + } } break; case RTE_FLOW_FIELD_POINTER: @@ -1799,7 +1833,12 @@ mlx5_flow_field_id_to_modify_info val = data->value; for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { if (mask[idx]) { - if (dst_width > 16) { + if (dst_width == 48) { + /*special case for MAC addresses */ + value[idx] = rte_cpu_to_be_16(val); + val >>= 16; + dst_width -= 16; + } else if (dst_width > 16) { value[idx] = rte_cpu_to_be_32(val); val >>= 32; } else if (dst_width > 8) { @@ -1845,6 +1884,8 @@ flow_dv_convert_action_modify_field const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *conf = (const struct rte_flow_action_modify_field *)(action->conf); struct rte_flow_item item; @@ -1855,7 +1896,8 @@ flow_dv_convert_action_modify_field uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t type; - uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + conf->dst.field); if (conf->src.field == RTE_FLOW_FIELD_POINTER || conf->src.field == RTE_FLOW_FIELD_VALUE) { @@ -2623,6 +2665,51 @@ flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, "specified range not supported"); } +/* + * Validate ASO CT item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Pointer to bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t *item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_conntrack *spec = item->spec; + const struct rte_flow_item_conntrack *mask = item->mask; + RTE_SET_USED(dev); + uint32_t flags; + + if (*item_flags & MLX5_FLOW_LAYER_ASO_CT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Only one CT is supported"); + if (!mask) + mask = &rte_flow_item_conntrack_mask; + flags = spec->flags & mask->flags; + if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) && + ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) || + (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) || + (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Conflict status bits"); + /* State change also needs to be considered. */ + *item_flags |= MLX5_FLOW_LAYER_ASO_CT; + return 0; +} + /** * Validate the pop VLAN action. * @@ -3442,6 +3529,57 @@ flow_dv_validate_action_raw_encap_decap return 0; } +/* + * Validate the ASO CT action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] item_flags + * The items found in this flow rule. + * @param[in] attr + * Pointer to flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev, + uint64_t action_flags, + uint64_t item_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + RTE_SET_USED(dev); + + if (attr->group == 0 && !attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Only support non-root table"); + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "CT cannot follow a fate action"); + if ((action_flags & MLX5_FLOW_ACTION_METER) || + (action_flags & MLX5_FLOW_ACTION_AGE)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Only one ASO action is supported"); + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Encap cannot exist before CT"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Not a outer TCP packet"); + return 0; +} + /** * Match encap_decap resource. * @@ -4614,10 +4752,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *action_modify_field = action->conf; - uint32_t dst_width = - mlx5_flow_item_field_width(action_modify_field->dst.field); - uint32_t src_width = - mlx5_flow_item_field_width(action_modify_field->src.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + action_modify_field->dst.field); + uint32_t src_width = mlx5_flow_item_field_width(config, + action_modify_field->src.field); ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (ret) @@ -4670,8 +4808,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, "inner header fields modification" " is not supported"); } - if (action_modify_field->dst.field == - action_modify_field->src.field) + if ((action_modify_field->dst.field == + action_modify_field->src.field) && + (action_modify_field->dst.level == + action_modify_field->src.level)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "source and destination fields" @@ -5990,28 +6130,33 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - /* - * If the counter action is shared by ID, the l3t_clear_entry function - * reduces its references counter. If after the reduction the action is - * still referenced, the function returns here and does not release it. - */ - if (IS_LEGACY_SHARED_CNT(counter) && - mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) - return; - /* - * If the counter action is shared by indirect action API, the atomic - * function reduces its references counter. If after the reduction the - * action is still referenced, the function returns here and does not - * release it. - * When the counter action is not shared neither by ID nor by indirect - * action API, shared info is 1 before the reduction, so this condition - * is failed and function doesn't return here. - */ - if (!IS_LEGACY_SHARED_CNT(counter) && - __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED)) - return; - if (pool->is_aged) + if (pool->is_aged) { flow_dv_counter_remove_from_age(dev, counter, cnt); + } else { + /* + * If the counter action is shared by ID, the l3t_clear_entry + * function reduces its references counter. If after the + * reduction the action is still referenced, the function + * returns here and does not release it. + */ + if (IS_LEGACY_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, + cnt->shared_info.id)) + return; + /* + * If the counter action is shared by indirect action API, + * the atomic function reduces its references counter. + * If after the reduction the action is still referenced, the + * function returns here and does not release it. + * When the counter action is not shared neither by ID nor by + * indirect action API, shared info is 1 before the reduction, + * so this condition is failed and function doesn't return here. + */ + if (!IS_LEGACY_SHARED_CNT(counter) && + __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, + __ATOMIC_RELAXED)) + return; + } cnt->pool = pool; /* * Put the counter back to list to be updated in none fallback mode. @@ -6531,10 +6676,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint32_t rw_act_num = 0; uint64_t is_root; const struct mlx5_flow_tunnel *tunnel; + enum mlx5_tof_rule_type tof_rule_type; struct flow_grp_info grp_info = { .external = !!external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, + .std_tbl_fix = true, }; const struct rte_eth_hairpin_conf *conf; const struct rte_flow_item *rule_items = items; @@ -6542,23 +6689,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (items == NULL) return -1; - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { - tunnel = flow_items_to_tunnel(items); - action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | - MLX5_FLOW_ACTION_DECAP; - } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) { - tunnel = flow_actions_to_tunnel(actions); - action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; - } else { - tunnel = NULL; + tunnel = is_tunnel_offload_active(dev) ? + mlx5_get_tof(items, actions, &tof_rule_type) : NULL; + if (tunnel) { + if (priv->representor) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "decap not supported for VF representor"); + if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE) + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE) + action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_DECAP; + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, attr, tunnel, tof_rule_type); } - if (tunnel && priv->representor) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "decap not supported " - "for VF representor"); - grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate - (dev, tunnel, attr, items, actions); ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; @@ -6572,15 +6718,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "item not supported"); switch (type) { - case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: - if (items[0].type != (typeof(items[0].type)) - MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, "MLX5 private items " - "must be the first"); - break; case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_PORT_ID: @@ -6873,6 +7010,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = MLX5_FLOW_ITEM_INTEGRITY; break; + case RTE_FLOW_ITEM_TYPE_CONNTRACK: + ret = flow_dv_validate_item_aso_ct(dev, items, + &item_flags, error); + if (ret < 0) + return ret; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: + /* tunnel offload item was processed before + * list it here as a supported type + */ + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -7414,17 +7562,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_SAMPLE; ++actions_n; break; - case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: - if (actions[0].type != (typeof(actions[0].type)) - MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "MLX5 private action " - "must be the first"); - - action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; - break; case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: ret = flow_dv_validate_action_modify_field(dev, action_flags, @@ -7441,6 +7578,19 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; rw_act_num += ret; break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + ret = flow_dv_validate_action_aso_ct(dev, action_flags, + item_flags, attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_CT; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + /* tunnel offload action was processed before + * list it here as a supported type + */ + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9094,27 +9244,14 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ - value = rte_cpu_to_be_32(value); - mask = rte_cpu_to_be_32(mask); if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; uint32_t shl_c0 = rte_bsf32(msk_c0); -#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN - uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); - value >>= shr_c0; - mask >>= shr_c0; -#endif - value <<= shl_c0; + mask &= msk_c0; mask <<= shl_c0; - MLX5_ASSERT(msk_c0); - MLX5_ASSERT(!(~msk_c0 & mask)); + value <<= shl_c0; } flow_dv_match_meta_reg(matcher, key, reg, value, mask); } @@ -9615,6 +9752,64 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, } } +/* + * Add connection tracking status item to matcher + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + uint32_t reg_value = 0; + int reg_id; + /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */ + uint32_t reg_mask = 0; + const struct rte_flow_item_conntrack *spec = item->spec; + const struct rte_flow_item_conntrack *mask = item->mask; + uint32_t flags; + struct rte_flow_error error; + + if (!mask) + mask = &rte_flow_item_conntrack_mask; + if (!spec || !mask->flags) + return; + flags = spec->flags & mask->flags; + /* The conflict should be checked in the validation. */ + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) + reg_value |= MLX5_CT_SYNDROME_VALID; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED) + reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) + reg_value |= MLX5_CT_SYNDROME_INVALID; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED) + reg_value |= MLX5_CT_SYNDROME_TRAP; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) + reg_value |= MLX5_CT_SYNDROME_BAD_PACKET; + if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID | + RTE_FLOW_CONNTRACK_PKT_STATE_INVALID | + RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)) + reg_mask |= 0xc0; + if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED) + reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE; + if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) + reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET; + /* The REG_C_x value could be saved during startup. */ + reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error); + if (reg_id == REG_NON) + return; + flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id, + reg_value, reg_mask); +} + static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; #define HEADER_IS_ZERO(match_criteria, headers) \ @@ -11324,38 +11519,35 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) } /** - * Create a age action using ASO mechanism. + * Initialize flow ASO age parameters. * * @param[in] dev * Pointer to rte_eth_dev structure. - * @param[in] age - * Pointer to the aging action configuration. - * @param[out] error - * Pointer to the error structure. + * @param[in] age_idx + * Index of ASO age action. + * @param[in] context + * Pointer to flow counter age context. + * @param[in] timeout + * Aging timeout in seconds. * - * @return - * Index to flow counter on success, 0 otherwise. */ -static uint32_t -flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, - const struct rte_flow_action_age *age, - struct rte_flow_error *error) +static void +flow_dv_aso_age_params_init(struct rte_eth_dev *dev, + uint32_t age_idx, + void *context, + uint32_t timeout) { - uint32_t age_idx = 0; struct mlx5_aso_age_action *aso_age; - age_idx = flow_dv_aso_age_alloc(dev, error); - if (!age_idx) - return 0; aso_age = flow_aso_age_get_by_idx(dev, age_idx); - aso_age->age_params.context = age->context; - aso_age->age_params.timeout = age->timeout; + MLX5_ASSERT(aso_age); + aso_age->age_params.context = context; + aso_age->age_params.timeout = timeout; aso_age->age_params.port_id = dev->data->port_id; __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, __ATOMIC_RELAXED); __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, __ATOMIC_RELAXED); - return age_idx; } static void @@ -11515,7 +11707,7 @@ flow_dv_prepare_counter(struct rte_eth_dev *dev, } /* - * Release an ASO CT action. + * Release an ASO CT action by its own device. * * @param[in] dev * Pointer to the Ethernet device structure. @@ -11526,13 +11718,19 @@ flow_dv_prepare_counter(struct rte_eth_dev *dev, * 0 when CT action was removed, otherwise the number of references. */ static inline int -flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t idx) +flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; - struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_idx(dev, idx); - uint32_t ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED); + uint32_t ret; + struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx); + enum mlx5_aso_ct_state state = + __atomic_load_n(&ct->state, __ATOMIC_RELAXED); + /* Cannot release when CT is in the ASO SQ. */ + if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY) + return -1; + ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED); if (!ret) { if (ct->dr_action_orig) { #ifdef HAVE_MLX5_DR_ACTION_ASO_CT @@ -11548,11 +11746,27 @@ flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t idx) #endif ct->dr_action_rply = NULL; } + /* Clear the state to free, no need in 1st allocation. */ + MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE); rte_spinlock_lock(&mng->ct_sl); LIST_INSERT_HEAD(&mng->free_cts, ct, next); rte_spinlock_unlock(&mng->ct_sl); } - return ret; + return (int)ret; +} + +static inline int +flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx) +{ + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx); + uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx); + struct rte_eth_dev *owndev = &rte_eth_devices[owner]; + RTE_SET_USED(dev); + + MLX5_ASSERT(owner < RTE_MAX_ETHPORTS); + if (dev->data->dev_started != 1) + return -1; + return flow_dv_aso_ct_dev_release(owndev, idx); } /* @@ -11704,7 +11918,7 @@ flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) RTE_SET_USED(reg_c); #endif if (!ct->dr_action_orig) { - flow_dv_aso_ct_release(dev, ct_idx); + flow_dv_aso_ct_dev_release(dev, ct_idx); rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "failed to create ASO CT action"); @@ -11720,7 +11934,7 @@ flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) reg_c - REG_C_0); #endif if (!ct->dr_action_rply) { - flow_dv_aso_ct_release(dev, ct_idx); + flow_dv_aso_ct_dev_release(dev, ct_idx); rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "failed to create ASO CT action"); @@ -11762,11 +11976,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Failed to allocate CT object"); - ct = flow_aso_ct_get_by_idx(dev, idx); + ct = flow_aso_ct_get_by_dev_idx(dev, idx); if (mlx5_aso_ct_update_by_wqe(sh, ct, pro)) return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Failed to update CT"); + ct->is_original = !!pro->is_original_dir; + ct->peer = pro->peer_port; return idx; } @@ -11843,13 +12059,14 @@ flow_dv_translate(struct rte_eth_dev *dev, int tmp_actions_n = 0; uint32_t table; int ret = 0; - const struct mlx5_flow_tunnel *tunnel; + const struct mlx5_flow_tunnel *tunnel = NULL; struct flow_grp_info grp_info = { .external = !!dev_flow->external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, .skip_scale = dev_flow->skip_scale & (1 << MLX5_SCALE_FLOW_GROUP_BIT), + .std_tbl_fix = true, }; const struct rte_flow_item *head_item = items; @@ -11865,15 +12082,21 @@ flow_dv_translate(struct rte_eth_dev *dev, MLX5DV_FLOW_TABLE_TYPE_NIC_RX; /* update normal path action resource into last index of array */ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; - tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ? - flow_items_to_tunnel(items) : - is_flow_tunnel_steer_rule(dev, attr, items, actions) ? - flow_actions_to_tunnel(actions) : - dev_flow->tunnel ? dev_flow->tunnel : NULL; + if (is_tunnel_offload_active(dev)) { + if (dev_flow->tunnel) { + RTE_VERIFY(dev_flow->tof_type == + MLX5_TUNNEL_OFFLOAD_MISS_RULE); + tunnel = dev_flow->tunnel; + } else { + tunnel = mlx5_get_tof(items, actions, + &dev_flow->tof_type); + dev_flow->tunnel = tunnel; + } + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, attr, tunnel, dev_flow->tof_type); + } mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : MLX5DV_FLOW_TABLE_TYPE_NIC_RX; - grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate - (dev, tunnel, attr, items, actions); ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, &grp_info, error); if (ret) @@ -11883,7 +12106,7 @@ flow_dv_translate(struct rte_eth_dev *dev, mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; /* number of actions must be set to 0 in case of dirty stack. */ mhdr_res->actions_num = 0; - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + if (is_flow_tunnel_match_rule(dev_flow->tof_type)) { /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop @@ -11925,6 +12148,8 @@ flow_dv_translate(struct rte_eth_dev *dev, int action_type = actions->type; const struct rte_flow_action *found_action = NULL; uint32_t jump_group = 0; + uint32_t owner_idx; + struct mlx5_aso_ct_action *ct; if (!mlx5_flow_os_action_supported(action_type)) return rte_flow_error_set(error, ENOTSUP, @@ -12378,6 +12603,31 @@ flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + owner_idx = (uint32_t)(uintptr_t)action->conf; + ct = flow_aso_ct_get_by_idx(dev, owner_idx); + if (!ct) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Failed to get CT object."); + if (mlx5_aso_ct_available(priv->sh, ct)) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "CT is unavailable."); + if (ct->is_original) + dev_flow->dv.actions[actions_n] = + ct->dr_action_orig; + else + dev_flow->dv.actions[actions_n] = + ct->dr_action_rply; + flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT; + flow->ct = owner_idx; + __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED); + actions_n++; + action_flags |= MLX5_FLOW_ACTION_CT; + break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; if (mhdr_res->actions_num) { @@ -12396,7 +12646,7 @@ flow_dv_translate(struct rte_eth_dev *dev, if ((non_shared_age && count && !count->shared) || !(priv->sh->flow_hit_aso_en && - attr->group)) { + (attr->group || attr->transfer))) { /* Creates age by counters. */ cnt_act = flow_dv_prepare_counter (dev, dev_flow, @@ -12410,17 +12660,17 @@ flow_dv_translate(struct rte_eth_dev *dev, break; } if (!flow->age && non_shared_age) { - flow->age = - flow_dv_translate_create_aso_age - (dev, - non_shared_age, - error); + flow->age = flow_dv_aso_age_alloc + (dev, error); if (!flow->age) - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "can't create ASO age action"); + return -rte_errno; + flow_dv_aso_age_params_init + (dev, flow->age, + non_shared_age->context ? + non_shared_age->context : + (void *)(uintptr_t) + (dev_flow->flow_idx), + non_shared_age->timeout); } age_act = flow_aso_age_get_by_idx(dev, flow->age); @@ -12695,6 +12945,10 @@ flow_dv_translate(struct rte_eth_dev *dev, match_value, head_item, items); break; + case RTE_FLOW_ITEM_TYPE_CONNTRACK: + flow_dv_translate_item_aso_ct(dev, match_mask, + match_value, items); + break; default: break; } @@ -13548,7 +13802,10 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) mlx5_flow_meter_detach(priv, fm); flow->meter = 0; } - if (flow->age) + /* Keep the current age handling by default. */ + if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct) + flow_dv_aso_ct_release(dev, flow->ct); + else if (flow->age) flow_dv_aso_age_release(dev, flow->age); if (flow->geneve_tlv_option) { flow_dv_geneve_tlv_option_resource_release(dev); @@ -13975,6 +14232,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, const struct rte_flow_action *action, struct rte_flow_error *err) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t age_idx = 0; uint32_t idx = 0; uint32_t ret = 0; @@ -13985,17 +14244,22 @@ flow_dv_action_create(struct rte_eth_dev *dev, MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; break; case RTE_FLOW_ACTION_TYPE_AGE: - ret = flow_dv_translate_create_aso_age(dev, action->conf, err); - idx = (MLX5_INDIRECT_ACTION_TYPE_AGE << - MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; - if (ret) { - struct mlx5_aso_age_action *aso_age = - flow_aso_age_get_by_idx(dev, ret); - - if (!aso_age->age_params.context) - aso_age->age_params.context = - (void *)(uintptr_t)idx; + age_idx = flow_dv_aso_age_alloc(dev, err); + if (!age_idx) { + ret = -rte_errno; + break; } + idx = (MLX5_INDIRECT_ACTION_TYPE_AGE << + MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx; + flow_dv_aso_age_params_init(dev, age_idx, + ((const struct rte_flow_action_age *) + action->conf)->context ? + ((const struct rte_flow_action_age *) + action->conf)->context : + (void *)(uintptr_t)idx, + ((const struct rte_flow_action_age *) + action->conf)->timeout); + ret = age_idx; break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL); @@ -14005,8 +14269,7 @@ flow_dv_action_create(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_CONNTRACK: ret = flow_dv_translate_create_conntrack(dev, action->conf, err); - idx = (MLX5_INDIRECT_ACTION_TYPE_CT << - MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; + idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); break; default: rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -14070,6 +14333,14 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was" " released with references %d.", idx, ret); return 0; + case MLX5_INDIRECT_ACTION_TYPE_CT: + ret = flow_dv_aso_ct_release(dev, idx); + if (ret < 0) + return ret; + if (ret > 0) + DRV_LOG(DEBUG, "Connection tracking object %u still " + "has references %d.", idx, ret); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -14144,6 +14415,72 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, return ret; } +/* + * Updates in place conntrack context or direction. + * Context update should be synchronized. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * The conntrack object ID to be updated. + * @param[in] update + * Pointer to the structure of information to update. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx, + const struct rte_flow_modify_conntrack *update, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_action *ct; + const struct rte_flow_action_conntrack *new_prf; + int ret = 0; + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + uint32_t dev_idx; + + if (PORT_ID(priv) != owner) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); + if (!ct->refcnt) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object is inactive"); + new_prf = &update->new_ct; + if (update->direction) + ct->is_original = !!new_prf->is_original_dir; + if (update->state) { + /* Only validate the profile when it needs to be updated. */ + ret = mlx5_validate_action_ct(dev, new_prf, error); + if (ret) + return ret; + ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to send CT context update WQE"); + /* Block until ready or a failure. */ + ret = mlx5_aso_ct_available(priv->sh, ct); + if (ret) + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Timeout to get the CT update"); + } + return ret; +} + /** * Updates in place shared action configuration, lock free, * (mutex should be acquired by caller). @@ -14179,6 +14516,8 @@ flow_dv_action_update(struct rte_eth_dev *dev, case MLX5_INDIRECT_ACTION_TYPE_RSS: action_conf = ((const struct rte_flow_action *)update)->conf; return __flow_dv_action_rss_update(dev, idx, action_conf, err); + case MLX5_INDIRECT_ACTION_TYPE_CT: + return __flow_dv_action_ct_update(dev, idx, update, err); default: return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -14404,12 +14743,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, MLX5_ASSERT(dev_flow.dv.tag_resource); act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_MARK; break; } @@ -14457,12 +14790,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, "set tag action"); act_cnt->modify_hdr = dev_flow.handle->dvh.modify_hdr; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; } @@ -14506,41 +14833,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, } case RTE_FLOW_ACTION_TYPE_QUEUE: { - struct mlx5_hrxq *hrxq; - uint32_t hrxq_idx; - struct mlx5_flow_rss_desc rss_desc; - struct mlx5_flow_meter_sub_policy *sub_policy = - mtr_policy->sub_policys[domain][0]; - if (i >= MLX5_MTR_RTE_COLORS) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, "cannot create policy " "fate queue for this color"); - memset(&rss_desc, 0, - sizeof(struct mlx5_flow_rss_desc)); - rss_desc.queue_num = 1; - rss_desc.const_q = act->conf; - hrxq = flow_dv_hrxq_prepare(dev, &dev_flow, - &rss_desc, &hrxq_idx); - if (!hrxq) - return -rte_mtr_error_set(error, - ENOTSUP, - RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, - "cannot create policy fate queue"); - sub_policy->rix_hrxq[i] = hrxq_idx; + act_cnt->queue = + ((const struct rte_flow_action_queue *) + (act->conf))->index; act_cnt->fate_action = MLX5_FLOW_FATE_QUEUE; dev_flow.handle->fate_action = MLX5_FLOW_FATE_QUEUE; - if (action_flags & MLX5_FLOW_ACTION_MARK || - action_flags & MLX5_FLOW_ACTION_SET_TAG) { - dev_flow.handle->rix_hrxq = hrxq_idx; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } + mtr_policy->is_queue = 1; action_flags |= MLX5_FLOW_ACTION_QUEUE; break; } @@ -14800,6 +15106,10 @@ flow_dv_action_query(struct rte_eth_dev *dev, uint32_t act_idx = (uint32_t)(uintptr_t)handle; uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_action *ct; + uint16_t owner; + uint32_t dev_idx; switch (type) { case MLX5_INDIRECT_ACTION_TYPE_AGE: @@ -14815,6 +15125,31 @@ flow_dv_action_query(struct rte_eth_dev *dev, return 0; case MLX5_INDIRECT_ACTION_TYPE_COUNT: return flow_dv_query_count(dev, idx, data, error); + case MLX5_INDIRECT_ACTION_TYPE_CT: + owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + if (owner != PORT_ID(priv)) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); + MLX5_ASSERT(ct); + if (!ct->refcnt) + return rte_flow_error_set(error, EFAULT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object is inactive"); + ((struct rte_flow_action_conntrack *)data)->peer_port = + ct->peer; + ((struct rte_flow_action_conntrack *)data)->is_original_dir = + ct->is_original; + if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data)) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to query CT context"); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -15725,6 +16060,73 @@ rss_sub_policy_error: return NULL; } + +/** + * Destroy the sub policy table with RX queue. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] mtr_policy + * Pointer to meter policy table. + */ +static void +flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_sub_policy *sub_policy = NULL; + uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; + uint32_t i, j; + uint16_t sub_policy_num, new_policy_num; + + rte_spinlock_lock(&mtr_policy->sl); + for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { + switch (mtr_policy->act_cnt[i].fate_action) { + case MLX5_FLOW_FATE_SHARED_RSS: + sub_policy_num = (mtr_policy->sub_policy_num >> + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) & + MLX5_MTR_SUB_POLICY_NUM_MASK; + new_policy_num = sub_policy_num; + for (j = 0; j < sub_policy_num; j++) { + sub_policy = + mtr_policy->sub_policys[domain][j]; + if (sub_policy) { + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + if (sub_policy != + mtr_policy->sub_policys[domain][0]) { + mtr_policy->sub_policys[domain][j] = + NULL; + mlx5_ipool_free + (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY], + sub_policy->idx); + new_policy_num--; + } + } + } + if (new_policy_num != sub_policy_num) { + mtr_policy->sub_policy_num &= + ~(MLX5_MTR_SUB_POLICY_NUM_MASK << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)); + mtr_policy->sub_policy_num |= + (new_policy_num & + MLX5_MTR_SUB_POLICY_NUM_MASK) << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain); + } + break; + case MLX5_FLOW_FATE_QUEUE: + sub_policy = mtr_policy->sub_policys[domain][0]; + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + break; + default: + /*Other actions without queue and do nothing*/ + break; + } + } + rte_spinlock_unlock(&mtr_policy->sl); +} + /** * Validate the batch counter support in root table. * @@ -15749,7 +16151,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) .size = sizeof(value.buf), }; struct mlx5dv_flow_matcher_attr dv_attr = { - .type = IBV_FLOW_ATTR_NORMAL, + .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS, .priority = 0, .match_criteria_enable = 0, .match_mask = (void *)&mask, @@ -15761,7 +16163,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) void *flow = NULL; int ret = -1; - tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, + tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL, 0, 0, 0, NULL); if (!tbl) goto err; @@ -15772,14 +16174,12 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) &actions[0]); if (ret) goto err; - actions[1] = sh->dr_drop_action ? sh->dr_drop_action : - priv->drop_queue.hrxq->action; dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, &matcher); if (ret) goto err; - ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2, + ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1, actions, &flow); err: /* @@ -15990,6 +16390,12 @@ flow_dv_action_validate(struct rte_eth_dev *dev, NULL, "Mix shared and indirect counter is not supported"); return flow_dv_validate_action_count(dev, true, 0, err); + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + if (!priv->sh->ct_aso_en) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "ASO CT is not supported"); + return mlx5_validate_action_ct(dev, action->conf, err); default: return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -16330,6 +16736,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .create_def_policy = flow_dv_create_def_policy, .destroy_def_policy = flow_dv_destroy_def_policy, .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare, + .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq, .counter_alloc = flow_dv_counter_allocate, .counter_free = flow_dv_counter_free, .counter_query = flow_dv_counter_query,