unsigned int off_b;
uint32_t mask;
uint32_t data;
+ bool next_field = true;
+ bool next_dcopy = true;
if (i >= MLX5_MAX_MODIFY_NUM)
return rte_flow_error_set(error, EINVAL,
size_b = sizeof(uint32_t) * CHAR_BIT -
off_b - __builtin_clz(mask);
MLX5_ASSERT(size_b);
- size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
actions[i] = (struct mlx5_modification_cmd) {
.action_type = type,
.field = field->id,
.offset = off_b,
- .length = size_b,
+ .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
+ 0 : size_b,
};
- /* Convert entire record to expected big-endian format. */
- actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
if (type == MLX5_MODIFICATION_TYPE_COPY) {
MLX5_ASSERT(dcopy);
actions[i].dst_field = dcopy->id;
(int)dcopy->offset < 0 ? off_b : dcopy->offset;
/* Convert entire record to big-endian format. */
actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
- ++dcopy;
+ /*
+ * Destination field overflow. Copy leftovers of
+ * a source field to the next destination field.
+ */
+ if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
+ actions[i].length = dcopy->size * CHAR_BIT;
+ field->offset += dcopy->size;
+ next_field = false;
+ }
+ /*
+ * Not enough bits in a source filed to fill a
+ * destination field. Switch to the next source.
+ */
+ if (dcopy->size > field->size &&
+ (size_b == field->size * CHAR_BIT)) {
+ actions[i].length = field->size * CHAR_BIT;
+ dcopy->offset += field->size * CHAR_BIT;
+ next_dcopy = false;
+ }
+ if (next_dcopy)
+ ++dcopy;
} else {
MLX5_ASSERT(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
data = (data & mask) >> off_b;
actions[i].data1 = rte_cpu_to_be_32(data);
}
+ /* Convert entire record to expected big-endian format. */
+ actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+ if (next_field)
+ ++field;
++i;
- ++field;
} while (field->size);
if (resource->actions_num == i)
return rte_flow_error_set(error, EINVAL,
const struct rte_flow_action_set_meta *conf,
struct rte_flow_error *error)
{
- uint32_t data = conf->data;
- uint32_t mask = conf->mask;
+ uint32_t mask = rte_cpu_to_be_32(conf->mask);
+ uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
struct rte_flow_item item = {
.spec = &data,
.mask = &mask,
if (reg < 0)
return reg;
MLX5_ASSERT(reg != REG_NON);
- /*
- * In datapath code there is no endianness
- * coversions for perfromance reasons, all
- * pattern conversions are done in rte_flow.
- */
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
- uint32_t shl_c0;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
- MLX5_ASSERT(msk_c0);
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- shl_c0 = rte_bsf32(msk_c0);
-#else
- shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
-#endif
- mask <<= shl_c0;
- data <<= shl_c0;
- MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+ data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
+ mask = rte_cpu_to_be_32(mask) & msk_c0;
+ mask = rte_cpu_to_be_32(mask << shl_c0);
}
reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
/* The routine expects parameters in memory as big-endian ones. */
}
static int
-mlx5_flow_item_field_width(enum rte_flow_field_id field)
+mlx5_flow_item_field_width(struct mlx5_dev_config *config,
+ enum rte_flow_field_id field)
{
switch (field) {
case RTE_FLOW_FIELD_START:
case RTE_FLOW_FIELD_MARK:
return 24;
case RTE_FLOW_FIELD_META:
- return 32;
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
+ return 16;
+ else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
+ return 32;
+ else
+ return 0;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
return 64;
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
uint32_t idx = 0;
+ uint32_t off = 0;
uint64_t val = 0;
switch (data->field) {
case RTE_FLOW_FIELD_START:
MLX5_ASSERT(false);
break;
case RTE_FLOW_FIELD_MAC_DST:
+ off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
- if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DMAC_47_16};
- if (width < 32) {
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ if (data->offset < 16) {
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_DMAC_15_0};
+ if (width < 16) {
+ mask[idx] = rte_cpu_to_be_16(0xffff >>
+ (16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
- width -= 32;
+ mask[idx] = RTE_BE16(0xffff);
+ width -= 16;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){2, 4 * idx,
- MLX5_MODI_OUT_DMAC_15_0};
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
- } else {
- if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx] = (struct field_modify_info){4, 4 * idx,
MLX5_MODI_OUT_DMAC_47_16};
- info[idx] = (struct field_modify_info){2, 0,
+ mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
+ } else {
+ if (data->offset < 16)
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
+ info[idx] = (struct field_modify_info){4, off,
+ MLX5_MODI_OUT_DMAC_47_16};
}
break;
case RTE_FLOW_FIELD_MAC_SRC:
+ off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
- if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SMAC_47_16};
- if (width < 32) {
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ if (data->offset < 16) {
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_SMAC_15_0};
+ if (width < 16) {
+ mask[idx] = rte_cpu_to_be_16(0xffff >>
+ (16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
- width -= 32;
+ mask[idx] = RTE_BE16(0xffff);
+ width -= 16;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){2, 4 * idx,
- MLX5_MODI_OUT_SMAC_15_0};
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
- } else {
- if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx] = (struct field_modify_info){4, 4 * idx,
MLX5_MODI_OUT_SMAC_47_16};
- info[idx] = (struct field_modify_info){2, 0,
+ mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
+ } else {
+ if (data->offset < 16)
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
+ info[idx] = (struct field_modify_info){4, off,
+ MLX5_MODI_OUT_SMAC_47_16};
}
break;
case RTE_FLOW_FIELD_VLAN_TYPE:
break;
case RTE_FLOW_FIELD_META:
{
+ unsigned int xmeta = config->dv_xmeta_en;
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
- info[idx] = (struct field_modify_info){4, 0,
- reg_to_field[reg]};
- if (mask)
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ if (xmeta == MLX5_XMETA_MODE_META16) {
+ info[idx] = (struct field_modify_info){2, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_16(0xffff >>
+ (16 - width));
+ } else if (xmeta == MLX5_XMETA_MODE_META32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ } else {
+ MLX5_ASSERT(false);
+ }
}
break;
case RTE_FLOW_FIELD_POINTER:
val = data->value;
for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
if (mask[idx]) {
- if (dst_width > 16) {
+ if (dst_width == 48) {
+ /*special case for MAC addresses */
+ value[idx] = rte_cpu_to_be_16(val);
+ val >>= 16;
+ dst_width -= 16;
+ } else if (dst_width > 16) {
value[idx] = rte_cpu_to_be_32(val);
val >>= 32;
} else if (dst_width > 8) {
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_action_modify_field *conf =
(const struct rte_flow_action_modify_field *)(action->conf);
struct rte_flow_item item;
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t type;
- uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
+ uint32_t dst_width = mlx5_flow_item_field_width(config,
+ conf->dst.field);
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
conf->src.field == RTE_FLOW_FIELD_VALUE) {
"specified range not supported");
}
+/*
+ * Validate ASO CT item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Pointer to bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_conntrack *spec = item->spec;
+ const struct rte_flow_item_conntrack *mask = item->mask;
+ RTE_SET_USED(dev);
+ uint32_t flags;
+
+ if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Only one CT is supported");
+ if (!mask)
+ mask = &rte_flow_item_conntrack_mask;
+ flags = spec->flags & mask->flags;
+ if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
+ ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
+ (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
+ (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Conflict status bits");
+ /* State change also needs to be considered. */
+ *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
+ return 0;
+}
+
/**
* Validate the pop VLAN action.
*
return 0;
}
+/*
+ * Validate the ASO CT action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] item_flags
+ * The items found in this flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ RTE_SET_USED(dev);
+
+ if (attr->group == 0 && !attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Only support non-root table");
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "CT cannot follow a fate action");
+ if ((action_flags & MLX5_FLOW_ACTION_METER) ||
+ (action_flags & MLX5_FLOW_ACTION_AGE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Only one ASO action is supported");
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Encap cannot exist before CT");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Not a outer TCP packet");
+ return 0;
+}
+
/**
* Match encap_decap resource.
*
struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_action_modify_field *action_modify_field =
action->conf;
- uint32_t dst_width =
- mlx5_flow_item_field_width(action_modify_field->dst.field);
- uint32_t src_width =
- mlx5_flow_item_field_width(action_modify_field->src.field);
+ uint32_t dst_width = mlx5_flow_item_field_width(config,
+ action_modify_field->dst.field);
+ uint32_t src_width = mlx5_flow_item_field_width(config,
+ action_modify_field->src.field);
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (ret)
"inner header fields modification"
" is not supported");
}
- if (action_modify_field->dst.field ==
- action_modify_field->src.field)
+ if ((action_modify_field->dst.field ==
+ action_modify_field->src.field) &&
+ (action_modify_field->dst.level ==
+ action_modify_field->src.level))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"source and destination fields"
"E-Switch must has a dest "
"port for mirroring");
if (!priv->config.hca_attr.reg_c_preserve &&
- priv->representor_id != -1)
+ priv->representor_id != UINT16_MAX)
*fdb_mirror_limit = 1;
}
/* Continue validation for Xcap actions.*/
return;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- /*
- * If the counter action is shared by ID, the l3t_clear_entry function
- * reduces its references counter. If after the reduction the action is
- * still referenced, the function returns here and does not release it.
- */
- if (IS_LEGACY_SHARED_CNT(counter) &&
- mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
- return;
- /*
- * If the counter action is shared by indirect action API, the atomic
- * function reduces its references counter. If after the reduction the
- * action is still referenced, the function returns here and does not
- * release it.
- * When the counter action is not shared neither by ID nor by indirect
- * action API, shared info is 1 before the reduction, so this condition
- * is failed and function doesn't return here.
- */
- if (!IS_LEGACY_SHARED_CNT(counter) &&
- __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED))
- return;
- if (pool->is_aged)
+ if (pool->is_aged) {
flow_dv_counter_remove_from_age(dev, counter, cnt);
+ } else {
+ /*
+ * If the counter action is shared by ID, the l3t_clear_entry
+ * function reduces its references counter. If after the
+ * reduction the action is still referenced, the function
+ * returns here and does not release it.
+ */
+ if (IS_LEGACY_SHARED_CNT(counter) &&
+ mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+ cnt->shared_info.id))
+ return;
+ /*
+ * If the counter action is shared by indirect action API,
+ * the atomic function reduces its references counter.
+ * If after the reduction the action is still referenced, the
+ * function returns here and does not release it.
+ * When the counter action is not shared neither by ID nor by
+ * indirect action API, shared info is 1 before the reduction,
+ * so this condition is failed and function doesn't return here.
+ */
+ if (!IS_LEGACY_SHARED_CNT(counter) &&
+ __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
+ __ATOMIC_RELAXED))
+ return;
+ }
cnt->pool = pool;
/*
* Put the counter back to list to be updated in none fallback mode.
uint32_t rw_act_num = 0;
uint64_t is_root;
const struct mlx5_flow_tunnel *tunnel;
+ enum mlx5_tof_rule_type tof_rule_type;
struct flow_grp_info grp_info = {
.external = !!external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
+ .std_tbl_fix = true,
};
const struct rte_eth_hairpin_conf *conf;
const struct rte_flow_item *rule_items = items;
if (items == NULL)
return -1;
- if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
- tunnel = flow_items_to_tunnel(items);
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
- MLX5_FLOW_ACTION_DECAP;
- } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
- tunnel = flow_actions_to_tunnel(actions);
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
- } else {
- tunnel = NULL;
+ tunnel = is_tunnel_offload_active(dev) ?
+ mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
+ if (tunnel) {
+ if (priv->representor)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "decap not supported for VF representor");
+ if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_DECAP;
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, attr, tunnel, tof_rule_type);
}
- if (tunnel && priv->representor)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "decap not supported "
- "for VF representor");
- grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
- (dev, tunnel, attr, items, actions);
ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (type) {
- case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
- if (items[0].type != (typeof(items[0].type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "MLX5 private items "
- "must be the first");
- break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
return ret;
last_item = MLX5_FLOW_ITEM_INTEGRITY;
break;
+ case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+ ret = flow_dv_validate_item_aso_ct(dev, items,
+ &item_flags, error);
+ if (ret < 0)
+ return ret;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
+ /* tunnel offload item was processed before
+ * list it here as a supported type
+ */
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
++actions_n;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
- if (actions[0].type != (typeof(actions[0].type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "MLX5 private action "
- "must be the first");
-
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
- break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_dv_validate_action_modify_field(dev,
action_flags,
if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
modify_after_mirror = 1;
/* Count all modify-header actions as one action. */
- if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
rw_act_num += ret;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ ret = flow_dv_validate_action_aso_ct(dev, action_flags,
+ item_flags, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_CT;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ /* tunnel offload action was processed before
+ * list it here as a supported type
+ */
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
- /*
- * In datapath code there is no endianness
- * coversions for perfromance reasons, all
- * pattern conversions are done in rte_flow.
- */
- value = rte_cpu_to_be_32(value);
- mask = rte_cpu_to_be_32(mask);
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
- value >>= shr_c0;
- mask >>= shr_c0;
-#endif
- value <<= shl_c0;
+ mask &= msk_c0;
mask <<= shl_c0;
- MLX5_ASSERT(msk_c0);
- MLX5_ASSERT(!(~msk_c0 & mask));
+ value <<= shl_c0;
}
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
}
}
+/*
+ * Add connection tracking status item to matcher
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ uint32_t reg_value = 0;
+ int reg_id;
+ /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
+ uint32_t reg_mask = 0;
+ const struct rte_flow_item_conntrack *spec = item->spec;
+ const struct rte_flow_item_conntrack *mask = item->mask;
+ uint32_t flags;
+ struct rte_flow_error error;
+
+ if (!mask)
+ mask = &rte_flow_item_conntrack_mask;
+ if (!spec || !mask->flags)
+ return;
+ flags = spec->flags & mask->flags;
+ /* The conflict should be checked in the validation. */
+ if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
+ reg_value |= MLX5_CT_SYNDROME_VALID;
+ if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
+ reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
+ if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
+ reg_value |= MLX5_CT_SYNDROME_INVALID;
+ if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
+ reg_value |= MLX5_CT_SYNDROME_TRAP;
+ if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
+ reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
+ if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
+ RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
+ RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
+ reg_mask |= 0xc0;
+ if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
+ reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
+ if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
+ reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
+ /* The REG_C_x value could be saved during startup. */
+ reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
+ if (reg_id == REG_NON)
+ return;
+ flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
+ reg_value, reg_mask);
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
}
/**
- * Create a age action using ASO mechanism.
+ * Initialize flow ASO age parameters.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
- * @param[in] age
- * Pointer to the aging action configuration.
- * @param[out] error
- * Pointer to the error structure.
+ * @param[in] age_idx
+ * Index of ASO age action.
+ * @param[in] context
+ * Pointer to flow counter age context.
+ * @param[in] timeout
+ * Aging timeout in seconds.
*
- * @return
- * Index to flow counter on success, 0 otherwise.
*/
-static uint32_t
-flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
- const struct rte_flow_action_age *age,
- struct rte_flow_error *error)
+static void
+flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
+ uint32_t age_idx,
+ void *context,
+ uint32_t timeout)
{
- uint32_t age_idx = 0;
struct mlx5_aso_age_action *aso_age;
- age_idx = flow_dv_aso_age_alloc(dev, error);
- if (!age_idx)
- return 0;
aso_age = flow_aso_age_get_by_idx(dev, age_idx);
- aso_age->age_params.context = age->context;
- aso_age->age_params.timeout = age->timeout;
+ MLX5_ASSERT(aso_age);
+ aso_age->age_params.context = context;
+ aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
__ATOMIC_RELAXED);
__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
__ATOMIC_RELAXED);
- return age_idx;
}
static void
return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
}
+/*
+ * Release an ASO CT action by its own device.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Index of ASO CT action to release.
+ *
+ * @return
+ * 0 when CT action was removed, otherwise the number of references.
+ */
+static inline int
+flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ uint32_t ret;
+ struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+ enum mlx5_aso_ct_state state =
+ __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+ /* Cannot release when CT is in the ASO SQ. */
+ if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
+ return -1;
+ ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ if (!ret) {
+ if (ct->dr_action_orig) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+ claim_zero(mlx5_glue->destroy_flow_action
+ (ct->dr_action_orig));
+#endif
+ ct->dr_action_orig = NULL;
+ }
+ if (ct->dr_action_rply) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+ claim_zero(mlx5_glue->destroy_flow_action
+ (ct->dr_action_rply));
+#endif
+ ct->dr_action_rply = NULL;
+ }
+ /* Clear the state to free, no need in 1st allocation. */
+ MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
+ rte_spinlock_lock(&mng->ct_sl);
+ LIST_INSERT_HEAD(&mng->free_cts, ct, next);
+ rte_spinlock_unlock(&mng->ct_sl);
+ }
+ return (int)ret;
+}
+
+static inline int
+flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
+{
+ uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
+ uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
+ struct rte_eth_dev *owndev = &rte_eth_devices[owner];
+ RTE_SET_USED(dev);
+
+ MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
+ if (dev->data->dev_started != 1)
+ return -1;
+ return flow_dv_aso_ct_dev_release(owndev, idx);
+}
+
+/*
+ * Resize the ASO CT pools array by 64 pools.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ void *old_pools = mng->pools;
+ /* Magic number now, need a macro. */
+ uint32_t resize = mng->n + 64;
+ uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+ if (!pools) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rte_rwlock_write_lock(&mng->resize_rwl);
+ /* ASO SQ/QP was already initialized in the startup. */
+ if (old_pools) {
+ /* Realloc could be an alternative choice. */
+ rte_memcpy(pools, old_pools,
+ mng->n * sizeof(struct mlx5_aso_ct_pool *));
+ mlx5_free(old_pools);
+ }
+ mng->n = resize;
+ mng->pools = pools;
+ rte_rwlock_write_unlock(&mng->resize_rwl);
+ return 0;
+}
+
+/*
+ * Create and initialize a new ASO CT pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] ct_free
+ * Where to put the pointer of a new CT action.
+ *
+ * @return
+ * The CT actions pool pointer and @p ct_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_ct_pool *
+flow_dv_ct_pool_create(struct rte_eth_dev *dev,
+ struct mlx5_aso_ct_action **ct_free)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ struct mlx5_aso_ct_pool *pool = NULL;
+ struct mlx5_devx_obj *obj = NULL;
+ uint32_t i;
+ uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
+
+ obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
+ priv->sh->pdn, log_obj_size);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
+ return NULL;
+ }
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ rte_errno = ENOMEM;
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ return NULL;
+ }
+ pool->devx_obj = obj;
+ pool->index = mng->next;
+ /* Resize pools array if there is no room for the new pool in it. */
+ if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ mlx5_free(pool);
+ return NULL;
+ }
+ mng->pools[pool->index] = pool;
+ mng->next++;
+ /* Assign the first action in the new pool, the rest go to free list. */
+ *ct_free = &pool->actions[0];
+ /* Lock outside, the list operation is safe here. */
+ for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
+ /* refcnt is 0 when allocating the memory. */
+ pool->actions[i].offset = i;
+ LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
+ }
+ return pool;
+}
+
+/*
+ * Allocate a ASO CT action from free list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to ASO CT action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+ struct mlx5_aso_ct_action *ct = NULL;
+ struct mlx5_aso_ct_pool *pool;
+ uint8_t reg_c;
+ uint32_t ct_idx;
+
+ MLX5_ASSERT(mng);
+ if (!priv->config.devx) {
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+ /* Get a free CT action, if no, a new pool will be created. */
+ rte_spinlock_lock(&mng->ct_sl);
+ ct = LIST_FIRST(&mng->free_cts);
+ if (ct) {
+ LIST_REMOVE(ct, next);
+ } else if (!flow_dv_ct_pool_create(dev, &ct)) {
+ rte_spinlock_unlock(&mng->ct_sl);
+ rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to create ASO CT pool");
+ return 0;
+ }
+ rte_spinlock_unlock(&mng->ct_sl);
+ pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
+ /* 0: inactive, 1: created, 2+: used by flows. */
+ __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
+ if (!ct->dr_action_orig) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+ ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
+ (priv->sh->rx_domain, pool->devx_obj->obj,
+ ct->offset,
+ MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
+ reg_c - REG_C_0);
+#else
+ RTE_SET_USED(reg_c);
+#endif
+ if (!ct->dr_action_orig) {
+ flow_dv_aso_ct_dev_release(dev, ct_idx);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "failed to create ASO CT action");
+ return 0;
+ }
+ }
+ if (!ct->dr_action_rply) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+ ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
+ (priv->sh->rx_domain, pool->devx_obj->obj,
+ ct->offset,
+ MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
+ reg_c - REG_C_0);
+#endif
+ if (!ct->dr_action_rply) {
+ flow_dv_aso_ct_dev_release(dev, ct_idx);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "failed to create ASO CT action");
+ return 0;
+ }
+ }
+ return ct_idx;
+}
+
+/*
+ * Create a conntrack object with context and actions by using ASO mechanism.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] pro
+ * Pointer to conntrack information profile.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to conntrack object on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
+ const struct rte_flow_action_conntrack *pro,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_aso_ct_action *ct;
+ uint32_t idx;
+
+ if (!sh->ct_aso_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Connection is not supported");
+ idx = flow_dv_aso_ct_alloc(dev, error);
+ if (!idx)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to allocate CT object");
+ ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+ if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to update CT");
+ ct->is_original = !!pro->is_original_dir;
+ ct->peer = pro->peer_port;
+ return idx;
+}
+
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
- const struct mlx5_flow_tunnel *tunnel;
+ const struct mlx5_flow_tunnel *tunnel = NULL;
struct flow_grp_info grp_info = {
.external = !!dev_flow->external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
.skip_scale = dev_flow->skip_scale &
(1 << MLX5_SCALE_FLOW_GROUP_BIT),
+ .std_tbl_fix = true,
};
const struct rte_flow_item *head_item = items;
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
/* update normal path action resource into last index of array */
sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
- tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
- flow_items_to_tunnel(items) :
- is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
- flow_actions_to_tunnel(actions) :
- dev_flow->tunnel ? dev_flow->tunnel : NULL;
+ if (is_tunnel_offload_active(dev)) {
+ if (dev_flow->tunnel) {
+ RTE_VERIFY(dev_flow->tof_type ==
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE);
+ tunnel = dev_flow->tunnel;
+ } else {
+ tunnel = mlx5_get_tof(items, actions,
+ &dev_flow->tof_type);
+ dev_flow->tunnel = tunnel;
+ }
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, attr, tunnel, dev_flow->tof_type);
+ }
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
- grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
- (dev, tunnel, attr, items, actions);
ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
&grp_info, error);
if (ret)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
- if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
/*
* do not add decap action if match rule drops packet
* HW rejects rules with decap & drop
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
uint32_t jump_group = 0;
+ uint32_t owner_idx;
+ struct mlx5_aso_ct_action *ct;
if (!mlx5_flow_os_action_supported(action_type))
return rte_flow_error_set(error, ENOTSUP,
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ ct = flow_aso_ct_get_by_idx(dev, owner_idx);
+ if (!ct)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Failed to get CT object.");
+ if (mlx5_aso_ct_available(priv->sh, ct))
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "CT is unavailable.");
+ if (ct->is_original)
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_orig;
+ else
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_rply;
+ flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
+ flow->ct = owner_idx;
+ __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_CT;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
if ((non_shared_age &&
count && !count->shared) ||
!(priv->sh->flow_hit_aso_en &&
- attr->group)) {
+ (attr->group || attr->transfer))) {
/* Creates age by counters. */
cnt_act = flow_dv_prepare_counter
(dev, dev_flow,
break;
}
if (!flow->age && non_shared_age) {
- flow->age =
- flow_dv_translate_create_aso_age
- (dev,
- non_shared_age,
- error);
+ flow->age = flow_dv_aso_age_alloc
+ (dev, error);
if (!flow->age)
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "can't create ASO age action");
+ return -rte_errno;
+ flow_dv_aso_age_params_init
+ (dev, flow->age,
+ non_shared_age->context ?
+ non_shared_age->context :
+ (void *)(uintptr_t)
+ (dev_flow->flow_idx),
+ non_shared_age->timeout);
}
age_act = flow_aso_age_get_by_idx(dev,
flow->age);
match_value,
head_item, items);
break;
+ case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+ flow_dv_translate_item_aso_ct(dev, match_mask,
+ match_value, items);
+ break;
default:
break;
}
mlx5_flow_meter_detach(priv, fm);
flow->meter = 0;
}
- if (flow->age)
+ /* Keep the current age handling by default. */
+ if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
+ flow_dv_aso_ct_release(dev, flow->ct);
+ else if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
if (flow->geneve_tlv_option) {
flow_dv_geneve_tlv_option_resource_release(dev);
const struct rte_flow_action *action,
struct rte_flow_error *err)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t age_idx = 0;
uint32_t idx = 0;
uint32_t ret = 0;
MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
- ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
- idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
- MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
- if (ret) {
- struct mlx5_aso_age_action *aso_age =
- flow_aso_age_get_by_idx(dev, ret);
-
- if (!aso_age->age_params.context)
- aso_age->age_params.context =
- (void *)(uintptr_t)idx;
+ age_idx = flow_dv_aso_age_alloc(dev, err);
+ if (!age_idx) {
+ ret = -rte_errno;
+ break;
}
+ idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
+ flow_dv_aso_age_params_init(dev, age_idx,
+ ((const struct rte_flow_action_age *)
+ action->conf)->context ?
+ ((const struct rte_flow_action_age *)
+ action->conf)->context :
+ (void *)(uintptr_t)idx,
+ ((const struct rte_flow_action_age *)
+ action->conf)->timeout);
+ ret = age_idx;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ ret = flow_dv_translate_create_conntrack(dev, action->conf,
+ err);
+ idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
+ break;
default:
rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "action type not supported");
DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
" released with references %d.", idx, ret);
return 0;
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ ret = flow_dv_aso_ct_release(dev, idx);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ DRV_LOG(DEBUG, "Connection tracking object %u still "
+ "has references %d.", idx, ret);
+ return 0;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
return ret;
}
+/*
+ * Updates in place conntrack context or direction.
+ * Context update should be synchronized.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * The conntrack object ID to be updated.
+ * @param[in] update
+ * Pointer to the structure of information to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
+ const struct rte_flow_modify_conntrack *update,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_action *ct;
+ const struct rte_flow_action_conntrack *new_prf;
+ int ret = 0;
+ uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
+ uint32_t dev_idx;
+
+ if (PORT_ID(priv) != owner)
+ return rte_flow_error_set(error, EACCES,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "CT object owned by another port");
+ dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
+ ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
+ if (!ct->refcnt)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "CT object is inactive");
+ new_prf = &update->new_ct;
+ if (update->direction)
+ ct->is_original = !!new_prf->is_original_dir;
+ if (update->state) {
+ /* Only validate the profile when it needs to be updated. */
+ ret = mlx5_validate_action_ct(dev, new_prf, error);
+ if (ret)
+ return ret;
+ ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
+ if (ret)
+ return rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to send CT context update WQE");
+ /* Block until ready or a failure. */
+ ret = mlx5_aso_ct_available(priv->sh, ct);
+ if (ret)
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Timeout to get the CT update");
+ }
+ return ret;
+}
+
/**
* Updates in place shared action configuration, lock free,
* (mutex should be acquired by caller).
case MLX5_INDIRECT_ACTION_TYPE_RSS:
action_conf = ((const struct rte_flow_action *)update)->conf;
return __flow_dv_action_rss_update(dev, idx, action_conf, err);
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ return __flow_dv_action_ct_update(dev, idx, update, err);
default:
return rte_flow_error_set(err, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
MLX5_ASSERT(dev_flow.dv.tag_resource);
act_cnt->rix_mark =
dev_flow.handle->dvh.rix_tag;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_MARK;
break;
}
"set tag action");
act_cnt->modify_hdr =
dev_flow.handle->dvh.modify_hdr;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
}
}
case RTE_FLOW_ACTION_TYPE_QUEUE:
{
- struct mlx5_hrxq *hrxq;
- uint32_t hrxq_idx;
- struct mlx5_flow_rss_desc rss_desc;
- struct mlx5_flow_meter_sub_policy *sub_policy =
- mtr_policy->sub_policys[domain][0];
-
if (i >= MLX5_MTR_RTE_COLORS)
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
NULL, "cannot create policy "
"fate queue for this color");
- memset(&rss_desc, 0,
- sizeof(struct mlx5_flow_rss_desc));
- rss_desc.queue_num = 1;
- rss_desc.const_q = act->conf;
- hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
- &rss_desc, &hrxq_idx);
- if (!hrxq)
- return -rte_mtr_error_set(error,
- ENOTSUP,
- RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL,
- "cannot create policy fate queue");
- sub_policy->rix_hrxq[i] = hrxq_idx;
+ act_cnt->queue =
+ ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
act_cnt->fate_action =
MLX5_FLOW_FATE_QUEUE;
dev_flow.handle->fate_action =
MLX5_FLOW_FATE_QUEUE;
- if (action_flags & MLX5_FLOW_ACTION_MARK ||
- action_flags & MLX5_FLOW_ACTION_SET_TAG) {
- dev_flow.handle->rix_hrxq = hrxq_idx;
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
+ mtr_policy->is_queue = 1;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
}
uint32_t act_idx = (uint32_t)(uintptr_t)handle;
uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_action *ct;
+ uint16_t owner;
+ uint32_t dev_idx;
switch (type) {
case MLX5_INDIRECT_ACTION_TYPE_AGE:
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
+ if (owner != PORT_ID(priv))
+ return rte_flow_error_set(error, EACCES,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "CT object owned by another port");
+ dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
+ ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
+ MLX5_ASSERT(ct);
+ if (!ct->refcnt)
+ return rte_flow_error_set(error, EFAULT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "CT object is inactive");
+ ((struct rte_flow_action_conntrack *)data)->peer_port =
+ ct->peer;
+ ((struct rte_flow_action_conntrack *)data)->is_original_dir =
+ ct->is_original;
+ if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
+ return rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to query CT context");
+ return 0;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
return NULL;
}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ uint32_t i, j;
+ uint16_t sub_policy_num, new_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ new_policy_num = sub_policy_num;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy =
+ mtr_policy->sub_policys[domain][j];
+ if (sub_policy) {
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ if (sub_policy !=
+ mtr_policy->sub_policys[domain][0]) {
+ mtr_policy->sub_policys[domain][j] =
+ NULL;
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ new_policy_num--;
+ }
+ }
+ }
+ if (new_policy_num != sub_policy_num) {
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+ mtr_policy->sub_policy_num |=
+ (new_policy_num &
+ MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+ }
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ break;
+ default:
+ /*Other actions without queue and do nothing*/
+ break;
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+}
+
/**
* Validate the batch counter support in root table.
*
.size = sizeof(value.buf),
};
struct mlx5dv_flow_matcher_attr dv_attr = {
- .type = IBV_FLOW_ATTR_NORMAL,
+ .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
.priority = 0,
.match_criteria_enable = 0,
.match_mask = (void *)&mask,
void *flow = NULL;
int ret = -1;
- tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
+ tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
0, 0, 0, NULL);
if (!tbl)
goto err;
&actions[0]);
if (ret)
goto err;
- actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
- priv->drop_queue.hrxq->action;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&matcher);
if (ret)
goto err;
- ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
+ ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
actions, &flow);
err:
/*
NULL,
"Mix shared and indirect counter is not supported");
return flow_dv_validate_action_count(dev, true, 0, err);
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ if (!priv->sh->ct_aso_en)
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "ASO CT is not supported");
+ return mlx5_validate_action_ct(dev, action->conf, err);
default:
return rte_flow_error_set(err, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
.create_def_policy = flow_dv_create_def_policy,
.destroy_def_policy = flow_dv_destroy_def_policy,
.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
+ .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,