X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=62edc4fa850746a83c371537f95f4feec264deec;hb=3186a3a49c3a33502ba6189a80b8317c0a064830;hp=f4fddd56a51bdc69dd4fb134d7f190890470f340;hpb=aca19061e4b961c1d31dedb690a8171c6e150dad;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index f4fddd56a5..62edc4fa85 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -426,6 +426,8 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, unsigned int off_b; uint32_t mask; uint32_t data; + bool next_field = true; + bool next_dcopy = true; if (i >= MLX5_MAX_MODIFY_NUM) return rte_flow_error_set(error, EINVAL, @@ -443,15 +445,13 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, size_b = sizeof(uint32_t) * CHAR_BIT - off_b - __builtin_clz(mask); MLX5_ASSERT(size_b); - size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; actions[i] = (struct mlx5_modification_cmd) { .action_type = type, .field = field->id, .offset = off_b, - .length = size_b, + .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ? + 0 : size_b, }; - /* Convert entire record to expected big-endian format. */ - actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); if (type == MLX5_MODIFICATION_TYPE_COPY) { MLX5_ASSERT(dcopy); actions[i].dst_field = dcopy->id; @@ -459,7 +459,27 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, (int)dcopy->offset < 0 ? off_b : dcopy->offset; /* Convert entire record to big-endian format. */ actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); - ++dcopy; + /* + * Destination field overflow. Copy leftovers of + * a source field to the next destination field. + */ + if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) { + actions[i].length = dcopy->size * CHAR_BIT; + field->offset += dcopy->size; + next_field = false; + } + /* + * Not enough bits in a source filed to fill a + * destination field. Switch to the next source. + */ + if (dcopy->size > field->size && + (size_b == field->size * CHAR_BIT)) { + actions[i].length = field->size * CHAR_BIT; + dcopy->offset += field->size * CHAR_BIT; + next_dcopy = false; + } + if (next_dcopy) + ++dcopy; } else { MLX5_ASSERT(item->spec); data = flow_dv_fetch_field((const uint8_t *)item->spec + @@ -468,8 +488,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, data = (data & mask) >> off_b; actions[i].data1 = rte_cpu_to_be_32(data); } + /* Convert entire record to expected big-endian format. */ + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + if (next_field) + ++field; ++i; - ++field; } while (field->size); if (resource->actions_num == i) return rte_flow_error_set(error, EINVAL, @@ -1239,8 +1262,8 @@ flow_dv_convert_action_set_meta const struct rte_flow_action_set_meta *conf, struct rte_flow_error *error) { - uint32_t data = conf->data; - uint32_t mask = conf->mask; + uint32_t mask = rte_cpu_to_be_32(conf->mask); + uint32_t data = rte_cpu_to_be_32(conf->data) & mask; struct rte_flow_item item = { .spec = &data, .mask = &mask, @@ -1253,25 +1276,14 @@ flow_dv_convert_action_set_meta if (reg < 0) return reg; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; - uint32_t shl_c0; + uint32_t shl_c0 = rte_bsf32(msk_c0); - MLX5_ASSERT(msk_c0); -#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN - shl_c0 = rte_bsf32(msk_c0); -#else - shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); -#endif - mask <<= shl_c0; - data <<= shl_c0; - MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); + data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); + mask = rte_cpu_to_be_32(mask) & msk_c0; + mask = rte_cpu_to_be_32(mask << shl_c0); } reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; /* The routine expects parameters in memory as big-endian ones. */ @@ -1356,7 +1368,8 @@ flow_dv_convert_action_modify_ipv6_dscp } static int -mlx5_flow_item_field_width(enum rte_flow_field_id field) +mlx5_flow_item_field_width(struct mlx5_dev_config *config, + enum rte_flow_field_id field) { switch (field) { case RTE_FLOW_FIELD_START: @@ -1404,7 +1417,12 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field) case RTE_FLOW_FIELD_MARK: return 24; case RTE_FLOW_FIELD_META: - return 32; + if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16) + return 16; + else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32) + return 32; + else + return 0; case RTE_FLOW_FIELD_POINTER: case RTE_FLOW_FIELD_VALUE: return 64; @@ -1424,7 +1442,10 @@ mlx5_flow_field_id_to_modify_info const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; uint32_t idx = 0; + uint32_t off = 0; uint64_t val = 0; switch (data->field) { case RTE_FLOW_FIELD_START: @@ -1432,61 +1453,63 @@ mlx5_flow_field_id_to_modify_info MLX5_ASSERT(false); break; case RTE_FLOW_FIELD_MAC_DST: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_DMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_DMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_DMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_DMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_DMAC_47_16}; } break; case RTE_FLOW_FIELD_MAC_SRC: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_SMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_SMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_SMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_SMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_SMAC_47_16}; } break; case RTE_FLOW_FIELD_VLAN_TYPE: @@ -1777,17 +1800,28 @@ mlx5_flow_field_id_to_modify_info break; case RTE_FLOW_FIELD_META: { + unsigned int xmeta = config->dv_xmeta_en; int reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); - info[idx] = (struct field_modify_info){4, 0, - reg_to_field[reg]}; - if (mask) - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (xmeta == MLX5_XMETA_MODE_META16) { + info[idx] = (struct field_modify_info){2, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); + } else if (xmeta == MLX5_XMETA_MODE_META32) { + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } else { + MLX5_ASSERT(false); + } } break; case RTE_FLOW_FIELD_POINTER: @@ -1799,7 +1833,12 @@ mlx5_flow_field_id_to_modify_info val = data->value; for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { if (mask[idx]) { - if (dst_width > 16) { + if (dst_width == 48) { + /*special case for MAC addresses */ + value[idx] = rte_cpu_to_be_16(val); + val >>= 16; + dst_width -= 16; + } else if (dst_width > 16) { value[idx] = rte_cpu_to_be_32(val); val >>= 32; } else if (dst_width > 8) { @@ -1845,6 +1884,8 @@ flow_dv_convert_action_modify_field const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *conf = (const struct rte_flow_action_modify_field *)(action->conf); struct rte_flow_item item; @@ -1855,7 +1896,8 @@ flow_dv_convert_action_modify_field uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t type; - uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + conf->dst.field); if (conf->src.field == RTE_FLOW_FIELD_POINTER || conf->src.field == RTE_FLOW_FIELD_VALUE) { @@ -4710,10 +4752,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *action_modify_field = action->conf; - uint32_t dst_width = - mlx5_flow_item_field_width(action_modify_field->dst.field); - uint32_t src_width = - mlx5_flow_item_field_width(action_modify_field->src.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + action_modify_field->dst.field); + uint32_t src_width = mlx5_flow_item_field_width(config, + action_modify_field->src.field); ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (ret) @@ -4766,8 +4808,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, "inner header fields modification" " is not supported"); } - if (action_modify_field->dst.field == - action_modify_field->src.field) + if ((action_modify_field->dst.field == + action_modify_field->src.field) && + (action_modify_field->dst.level == + action_modify_field->src.level)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "source and destination fields" @@ -6086,28 +6130,33 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - /* - * If the counter action is shared by ID, the l3t_clear_entry function - * reduces its references counter. If after the reduction the action is - * still referenced, the function returns here and does not release it. - */ - if (IS_LEGACY_SHARED_CNT(counter) && - mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) - return; - /* - * If the counter action is shared by indirect action API, the atomic - * function reduces its references counter. If after the reduction the - * action is still referenced, the function returns here and does not - * release it. - * When the counter action is not shared neither by ID nor by indirect - * action API, shared info is 1 before the reduction, so this condition - * is failed and function doesn't return here. - */ - if (!IS_LEGACY_SHARED_CNT(counter) && - __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED)) - return; - if (pool->is_aged) + if (pool->is_aged) { flow_dv_counter_remove_from_age(dev, counter, cnt); + } else { + /* + * If the counter action is shared by ID, the l3t_clear_entry + * function reduces its references counter. If after the + * reduction the action is still referenced, the function + * returns here and does not release it. + */ + if (IS_LEGACY_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, + cnt->shared_info.id)) + return; + /* + * If the counter action is shared by indirect action API, + * the atomic function reduces its references counter. + * If after the reduction the action is still referenced, the + * function returns here and does not release it. + * When the counter action is not shared neither by ID nor by + * indirect action API, shared info is 1 before the reduction, + * so this condition is failed and function doesn't return here. + */ + if (!IS_LEGACY_SHARED_CNT(counter) && + __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, + __ATOMIC_RELAXED)) + return; + } cnt->pool = pool; /* * Put the counter back to list to be updated in none fallback mode. @@ -6627,10 +6676,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint32_t rw_act_num = 0; uint64_t is_root; const struct mlx5_flow_tunnel *tunnel; + enum mlx5_tof_rule_type tof_rule_type; struct flow_grp_info grp_info = { .external = !!external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, + .std_tbl_fix = true, }; const struct rte_eth_hairpin_conf *conf; const struct rte_flow_item *rule_items = items; @@ -6638,23 +6689,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (items == NULL) return -1; - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { - tunnel = flow_items_to_tunnel(items); - action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | - MLX5_FLOW_ACTION_DECAP; - } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) { - tunnel = flow_actions_to_tunnel(actions); - action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; - } else { - tunnel = NULL; + tunnel = is_tunnel_offload_active(dev) ? + mlx5_get_tof(items, actions, &tof_rule_type) : NULL; + if (tunnel) { + if (priv->representor) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "decap not supported for VF representor"); + if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE) + action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; + else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE) + action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_DECAP; + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, attr, tunnel, tof_rule_type); } - if (tunnel && priv->representor) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "decap not supported " - "for VF representor"); - grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate - (dev, tunnel, attr, items, actions); ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error); if (ret < 0) return ret; @@ -6668,15 +6718,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "item not supported"); switch (type) { - case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: - if (items[0].type != (typeof(items[0].type)) - MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, "MLX5 private items " - "must be the first"); - break; case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_PORT_ID: @@ -6975,6 +7016,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (ret < 0) return ret; break; + case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL: + /* tunnel offload item was processed before + * list it here as a supported type + */ + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -7516,17 +7562,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_SAMPLE; ++actions_n; break; - case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: - if (actions[0].type != (typeof(actions[0].type)) - MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "MLX5 private action " - "must be the first"); - - action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET; - break; case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: ret = flow_dv_validate_action_modify_field(dev, action_flags, @@ -7551,6 +7586,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; action_flags |= MLX5_FLOW_ACTION_CT; break; + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: + /* tunnel offload action was processed before + * list it here as a supported type + */ + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9204,27 +9244,14 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ - value = rte_cpu_to_be_32(value); - mask = rte_cpu_to_be_32(mask); if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; uint32_t shl_c0 = rte_bsf32(msk_c0); -#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN - uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); - value >>= shr_c0; - mask >>= shr_c0; -#endif - value <<= shl_c0; + mask &= msk_c0; mask <<= shl_c0; - MLX5_ASSERT(msk_c0); - MLX5_ASSERT(!(~msk_c0 & mask)); + value <<= shl_c0; } flow_dv_match_meta_reg(matcher, key, reg, value, mask); } @@ -11492,38 +11519,35 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) } /** - * Create a age action using ASO mechanism. + * Initialize flow ASO age parameters. * * @param[in] dev * Pointer to rte_eth_dev structure. - * @param[in] age - * Pointer to the aging action configuration. - * @param[out] error - * Pointer to the error structure. + * @param[in] age_idx + * Index of ASO age action. + * @param[in] context + * Pointer to flow counter age context. + * @param[in] timeout + * Aging timeout in seconds. * - * @return - * Index to flow counter on success, 0 otherwise. */ -static uint32_t -flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, - const struct rte_flow_action_age *age, - struct rte_flow_error *error) +static void +flow_dv_aso_age_params_init(struct rte_eth_dev *dev, + uint32_t age_idx, + void *context, + uint32_t timeout) { - uint32_t age_idx = 0; struct mlx5_aso_age_action *aso_age; - age_idx = flow_dv_aso_age_alloc(dev, error); - if (!age_idx) - return 0; aso_age = flow_aso_age_get_by_idx(dev, age_idx); - aso_age->age_params.context = age->context; - aso_age->age_params.timeout = age->timeout; + MLX5_ASSERT(aso_age); + aso_age->age_params.context = context; + aso_age->age_params.timeout = timeout; aso_age->age_params.port_id = dev->data->port_id; __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, __ATOMIC_RELAXED); __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, __ATOMIC_RELAXED); - return age_idx; } static void @@ -11683,7 +11707,7 @@ flow_dv_prepare_counter(struct rte_eth_dev *dev, } /* - * Release an ASO CT action. + * Release an ASO CT action by its own device. * * @param[in] dev * Pointer to the Ethernet device structure. @@ -11694,12 +11718,12 @@ flow_dv_prepare_counter(struct rte_eth_dev *dev, * 0 when CT action was removed, otherwise the number of references. */ static inline int -flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t idx) +flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; uint32_t ret; - struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_idx(dev, idx); + struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx); enum mlx5_aso_ct_state state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED); @@ -11728,7 +11752,21 @@ flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t idx) LIST_INSERT_HEAD(&mng->free_cts, ct, next); rte_spinlock_unlock(&mng->ct_sl); } - return ret; + return (int)ret; +} + +static inline int +flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx) +{ + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx); + uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx); + struct rte_eth_dev *owndev = &rte_eth_devices[owner]; + RTE_SET_USED(dev); + + MLX5_ASSERT(owner < RTE_MAX_ETHPORTS); + if (dev->data->dev_started != 1) + return -1; + return flow_dv_aso_ct_dev_release(owndev, idx); } /* @@ -11880,7 +11918,7 @@ flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) RTE_SET_USED(reg_c); #endif if (!ct->dr_action_orig) { - flow_dv_aso_ct_release(dev, ct_idx); + flow_dv_aso_ct_dev_release(dev, ct_idx); rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "failed to create ASO CT action"); @@ -11896,7 +11934,7 @@ flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) reg_c - REG_C_0); #endif if (!ct->dr_action_rply) { - flow_dv_aso_ct_release(dev, ct_idx); + flow_dv_aso_ct_dev_release(dev, ct_idx); rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "failed to create ASO CT action"); @@ -11938,12 +11976,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Failed to allocate CT object"); - ct = flow_aso_ct_get_by_idx(dev, idx); + ct = flow_aso_ct_get_by_dev_idx(dev, idx); if (mlx5_aso_ct_update_by_wqe(sh, ct, pro)) return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Failed to update CT"); ct->is_original = !!pro->is_original_dir; + ct->peer = pro->peer_port; return idx; } @@ -12020,13 +12059,14 @@ flow_dv_translate(struct rte_eth_dev *dev, int tmp_actions_n = 0; uint32_t table; int ret = 0; - const struct mlx5_flow_tunnel *tunnel; + const struct mlx5_flow_tunnel *tunnel = NULL; struct flow_grp_info grp_info = { .external = !!dev_flow->external, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, .skip_scale = dev_flow->skip_scale & (1 << MLX5_SCALE_FLOW_GROUP_BIT), + .std_tbl_fix = true, }; const struct rte_flow_item *head_item = items; @@ -12042,15 +12082,21 @@ flow_dv_translate(struct rte_eth_dev *dev, MLX5DV_FLOW_TABLE_TYPE_NIC_RX; /* update normal path action resource into last index of array */ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; - tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ? - flow_items_to_tunnel(items) : - is_flow_tunnel_steer_rule(dev, attr, items, actions) ? - flow_actions_to_tunnel(actions) : - dev_flow->tunnel ? dev_flow->tunnel : NULL; + if (is_tunnel_offload_active(dev)) { + if (dev_flow->tunnel) { + RTE_VERIFY(dev_flow->tof_type == + MLX5_TUNNEL_OFFLOAD_MISS_RULE); + tunnel = dev_flow->tunnel; + } else { + tunnel = mlx5_get_tof(items, actions, + &dev_flow->tof_type); + dev_flow->tunnel = tunnel; + } + grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate + (dev, attr, tunnel, dev_flow->tof_type); + } mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : MLX5DV_FLOW_TABLE_TYPE_NIC_RX; - grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate - (dev, tunnel, attr, items, actions); ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table, &grp_info, error); if (ret) @@ -12060,7 +12106,7 @@ flow_dv_translate(struct rte_eth_dev *dev, mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; /* number of actions must be set to 0 in case of dirty stack. */ mhdr_res->actions_num = 0; - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) { + if (is_flow_tunnel_match_rule(dev_flow->tof_type)) { /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop @@ -12102,7 +12148,7 @@ flow_dv_translate(struct rte_eth_dev *dev, int action_type = actions->type; const struct rte_flow_action *found_action = NULL; uint32_t jump_group = 0; - uint32_t ct_idx; + uint32_t owner_idx; struct mlx5_aso_ct_action *ct; if (!mlx5_flow_os_action_supported(action_type)) @@ -12558,8 +12604,13 @@ flow_dv_translate(struct rte_eth_dev *dev, action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; break; case RTE_FLOW_ACTION_TYPE_CONNTRACK: - ct_idx = (uint32_t)(uintptr_t)action->conf; - ct = flow_aso_ct_get_by_idx(dev, ct_idx); + owner_idx = (uint32_t)(uintptr_t)action->conf; + ct = flow_aso_ct_get_by_idx(dev, owner_idx); + if (!ct) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Failed to get CT object."); if (mlx5_aso_ct_available(priv->sh, ct)) return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, @@ -12572,7 +12623,7 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.actions[actions_n] = ct->dr_action_rply; flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT; - flow->ct = ct_idx; + flow->ct = owner_idx; __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED); actions_n++; action_flags |= MLX5_FLOW_ACTION_CT; @@ -12595,7 +12646,7 @@ flow_dv_translate(struct rte_eth_dev *dev, if ((non_shared_age && count && !count->shared) || !(priv->sh->flow_hit_aso_en && - attr->group)) { + (attr->group || attr->transfer))) { /* Creates age by counters. */ cnt_act = flow_dv_prepare_counter (dev, dev_flow, @@ -12609,17 +12660,17 @@ flow_dv_translate(struct rte_eth_dev *dev, break; } if (!flow->age && non_shared_age) { - flow->age = - flow_dv_translate_create_aso_age - (dev, - non_shared_age, - error); + flow->age = flow_dv_aso_age_alloc + (dev, error); if (!flow->age) - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "can't create ASO age action"); + return -rte_errno; + flow_dv_aso_age_params_init + (dev, flow->age, + non_shared_age->context ? + non_shared_age->context : + (void *)(uintptr_t) + (dev_flow->flow_idx), + non_shared_age->timeout); } age_act = flow_aso_age_get_by_idx(dev, flow->age); @@ -14181,6 +14232,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, const struct rte_flow_action *action, struct rte_flow_error *err) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t age_idx = 0; uint32_t idx = 0; uint32_t ret = 0; @@ -14191,17 +14244,22 @@ flow_dv_action_create(struct rte_eth_dev *dev, MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; break; case RTE_FLOW_ACTION_TYPE_AGE: - ret = flow_dv_translate_create_aso_age(dev, action->conf, err); - idx = (MLX5_INDIRECT_ACTION_TYPE_AGE << - MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; - if (ret) { - struct mlx5_aso_age_action *aso_age = - flow_aso_age_get_by_idx(dev, ret); - - if (!aso_age->age_params.context) - aso_age->age_params.context = - (void *)(uintptr_t)idx; + age_idx = flow_dv_aso_age_alloc(dev, err); + if (!age_idx) { + ret = -rte_errno; + break; } + idx = (MLX5_INDIRECT_ACTION_TYPE_AGE << + MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx; + flow_dv_aso_age_params_init(dev, age_idx, + ((const struct rte_flow_action_age *) + action->conf)->context ? + ((const struct rte_flow_action_age *) + action->conf)->context : + (void *)(uintptr_t)idx, + ((const struct rte_flow_action_age *) + action->conf)->timeout); + ret = age_idx; break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL); @@ -14211,8 +14269,7 @@ flow_dv_action_create(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_CONNTRACK: ret = flow_dv_translate_create_conntrack(dev, action->conf, err); - idx = (MLX5_INDIRECT_ACTION_TYPE_CT << - MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; + idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); break; default: rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -14278,7 +14335,9 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, return 0; case MLX5_INDIRECT_ACTION_TYPE_CT: ret = flow_dv_aso_ct_release(dev, idx); - if (ret) + if (ret < 0) + return ret; + if (ret > 0) DRV_LOG(DEBUG, "Connection tracking object %u still " "has references %d.", idx, ret); return 0; @@ -14382,8 +14441,16 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx, struct mlx5_aso_ct_action *ct; const struct rte_flow_action_conntrack *new_prf; int ret = 0; + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + uint32_t dev_idx; - ct = flow_aso_ct_get_by_idx(dev, idx); + if (PORT_ID(priv) != owner) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); if (!ct->refcnt) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -14676,12 +14743,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, MLX5_ASSERT(dev_flow.dv.tag_resource); act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_MARK; break; } @@ -14729,12 +14790,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, "set tag action"); act_cnt->modify_hdr = dev_flow.handle->dvh.modify_hdr; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; } @@ -14778,41 +14833,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, } case RTE_FLOW_ACTION_TYPE_QUEUE: { - struct mlx5_hrxq *hrxq; - uint32_t hrxq_idx; - struct mlx5_flow_rss_desc rss_desc; - struct mlx5_flow_meter_sub_policy *sub_policy = - mtr_policy->sub_policys[domain][0]; - if (i >= MLX5_MTR_RTE_COLORS) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, "cannot create policy " "fate queue for this color"); - memset(&rss_desc, 0, - sizeof(struct mlx5_flow_rss_desc)); - rss_desc.queue_num = 1; - rss_desc.const_q = act->conf; - hrxq = flow_dv_hrxq_prepare(dev, &dev_flow, - &rss_desc, &hrxq_idx); - if (!hrxq) - return -rte_mtr_error_set(error, - ENOTSUP, - RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, - "cannot create policy fate queue"); - sub_policy->rix_hrxq[i] = hrxq_idx; + act_cnt->queue = + ((const struct rte_flow_action_queue *) + (act->conf))->index; act_cnt->fate_action = MLX5_FLOW_FATE_QUEUE; dev_flow.handle->fate_action = MLX5_FLOW_FATE_QUEUE; - if (action_flags & MLX5_FLOW_ACTION_MARK || - action_flags & MLX5_FLOW_ACTION_SET_TAG) { - dev_flow.handle->rix_hrxq = hrxq_idx; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } + mtr_policy->is_queue = 1; action_flags |= MLX5_FLOW_ACTION_QUEUE; break; } @@ -15074,6 +15108,8 @@ flow_dv_action_query(struct rte_eth_dev *dev, uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_ct_action *ct; + uint16_t owner; + uint32_t dev_idx; switch (type) { case MLX5_INDIRECT_ACTION_TYPE_AGE: @@ -15090,7 +15126,15 @@ flow_dv_action_query(struct rte_eth_dev *dev, case MLX5_INDIRECT_ACTION_TYPE_COUNT: return flow_dv_query_count(dev, idx, data, error); case MLX5_INDIRECT_ACTION_TYPE_CT: - ct = flow_aso_ct_get_by_idx(dev, idx); + owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + if (owner != PORT_ID(priv)) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); + MLX5_ASSERT(ct); if (!ct->refcnt) return rte_flow_error_set(error, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -16016,6 +16060,73 @@ rss_sub_policy_error: return NULL; } + +/** + * Destroy the sub policy table with RX queue. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] mtr_policy + * Pointer to meter policy table. + */ +static void +flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_sub_policy *sub_policy = NULL; + uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; + uint32_t i, j; + uint16_t sub_policy_num, new_policy_num; + + rte_spinlock_lock(&mtr_policy->sl); + for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { + switch (mtr_policy->act_cnt[i].fate_action) { + case MLX5_FLOW_FATE_SHARED_RSS: + sub_policy_num = (mtr_policy->sub_policy_num >> + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) & + MLX5_MTR_SUB_POLICY_NUM_MASK; + new_policy_num = sub_policy_num; + for (j = 0; j < sub_policy_num; j++) { + sub_policy = + mtr_policy->sub_policys[domain][j]; + if (sub_policy) { + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + if (sub_policy != + mtr_policy->sub_policys[domain][0]) { + mtr_policy->sub_policys[domain][j] = + NULL; + mlx5_ipool_free + (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY], + sub_policy->idx); + new_policy_num--; + } + } + } + if (new_policy_num != sub_policy_num) { + mtr_policy->sub_policy_num &= + ~(MLX5_MTR_SUB_POLICY_NUM_MASK << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)); + mtr_policy->sub_policy_num |= + (new_policy_num & + MLX5_MTR_SUB_POLICY_NUM_MASK) << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain); + } + break; + case MLX5_FLOW_FATE_QUEUE: + sub_policy = mtr_policy->sub_policys[domain][0]; + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + break; + default: + /*Other actions without queue and do nothing*/ + break; + } + } + rte_spinlock_unlock(&mtr_policy->sl); +} + /** * Validate the batch counter support in root table. * @@ -16040,7 +16151,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) .size = sizeof(value.buf), }; struct mlx5dv_flow_matcher_attr dv_attr = { - .type = IBV_FLOW_ATTR_NORMAL, + .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS, .priority = 0, .match_criteria_enable = 0, .match_mask = (void *)&mask, @@ -16052,7 +16163,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) void *flow = NULL; int ret = -1; - tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, + tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL, 0, 0, 0, NULL); if (!tbl) goto err; @@ -16063,14 +16174,12 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev) &actions[0]); if (ret) goto err; - actions[1] = sh->dr_drop_action ? sh->dr_drop_action : - priv->drop_queue.hrxq->action; dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf); ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, &matcher); if (ret) goto err; - ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2, + ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1, actions, &flow); err: /* @@ -16627,6 +16736,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .create_def_policy = flow_dv_create_def_policy, .destroy_def_policy = flow_dv_destroy_def_policy, .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare, + .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq, .counter_alloc = flow_dv_counter_allocate, .counter_free = flow_dv_counter_free, .counter_query = flow_dv_counter_query,