if (conf->dst == REG_C_0) {
/* Copy to reg_c[0], within mask only. */
reg_dst.offset = rte_bsf32(reg_c0);
- /*
- * Mask is ignoring the enianness, because
- * there is no conversion in datapath.
- */
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- /* Copy from destination lower bits to reg_c[0]. */
- mask = reg_c0 >> reg_dst.offset;
-#else
- /* Copy from destination upper bits to reg_c[0]. */
- mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
- rte_fls_u32(reg_c0));
-#endif
+ mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
} else {
- mask = rte_cpu_to_be_32(reg_c0);
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- /* Copy from reg_c[0] to destination lower bits. */
reg_dst.offset = 0;
-#else
- /* Copy from reg_c[0] to destination upper bits. */
- reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
- (rte_fls_u32(reg_c0) -
- rte_bsf32(reg_c0));
-#endif
+ mask = rte_cpu_to_be_32(reg_c0);
}
}
return flow_dv_convert_modify_action(&item,
}
static int
-mlx5_flow_item_field_width(struct mlx5_dev_config *config,
- enum rte_flow_field_id field)
+mlx5_flow_item_field_width(struct mlx5_priv *priv,
+ enum rte_flow_field_id field, int inherit)
{
switch (field) {
case RTE_FLOW_FIELD_START:
case RTE_FLOW_FIELD_TAG:
return 32;
case RTE_FLOW_FIELD_MARK:
- return 24;
+ return __builtin_popcount(priv->sh->dv_mark_mask);
case RTE_FLOW_FIELD_META:
- if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
- return 16;
- else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
- return 32;
- else
- return 0;
+ return __builtin_popcount(priv->sh->dv_meta_mask);
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
- return 64;
+ return inherit < 0 ? 0 : inherit;
default:
MLX5_ASSERT(false);
}
static void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
- struct field_modify_info *info,
- uint32_t *mask, uint32_t *value,
- uint32_t width, uint32_t dst_width,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+ struct field_modify_info *info, uint32_t *mask,
+ uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
uint32_t idx = 0;
uint32_t off = 0;
- uint64_t val = 0;
+
switch (data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 0,
+ info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
if (width < 16) {
mask[idx] = rte_cpu_to_be_16(0xffff >>
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
mask[idx] = rte_cpu_to_be_32((0xffffffff >>
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 0,
+ info[idx++] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
- info[idx] = (struct field_modify_info){4, off,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
}
break;
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 0,
+ info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
if (width < 16) {
mask[idx] = rte_cpu_to_be_16(0xffff >>
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
mask[idx] = rte_cpu_to_be_32((0xffffffff >>
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 0,
+ info[idx++] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
- info[idx] = (struct field_modify_info){4, off,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
}
break;
case RTE_FLOW_FIELD_IPV6_SRC:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
mask[idx] =
++idx;
}
if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
mask[idx] =
++idx;
}
if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
mask[idx] =
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
case RTE_FLOW_FIELD_IPV6_DST:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
mask[idx] =
++idx;
}
if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
mask[idx] =
++idx;
}
if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
mask[idx] =
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 0,
+ info[idx++] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
break;
case RTE_FLOW_FIELD_MARK:
{
+ uint32_t mark_mask = priv->sh->dv_mark_mask;
+ uint32_t mark_count = __builtin_popcount(mark_mask);
int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
0, error);
if (reg < 0)
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = rte_cpu_to_be_32((mark_mask >>
+ (mark_count - width)) & mark_mask);
}
break;
case RTE_FLOW_FIELD_META:
{
- unsigned int xmeta = config->dv_xmeta_en;
+ uint32_t meta_mask = priv->sh->dv_meta_mask;
+ uint32_t meta_count = __builtin_popcount(meta_mask);
+ uint32_t msk_c0 =
+ rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
- if (xmeta == MLX5_XMETA_MODE_META16) {
- info[idx] = (struct field_modify_info){2, 0,
- reg_to_field[reg]};
- if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >>
- (16 - width));
- } else if (xmeta == MLX5_XMETA_MODE_META32) {
- info[idx] = (struct field_modify_info){4, 0,
- reg_to_field[reg]};
- if (mask)
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- } else {
- MLX5_ASSERT(false);
- }
+ if (reg == REG_C_0)
+ *shift = shl_c0;
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32((meta_mask >>
+ (meta_count - width)) & meta_mask);
}
break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
- if (data->field == RTE_FLOW_FIELD_POINTER)
- memcpy(&val, (void *)(uintptr_t)data->value,
- sizeof(uint64_t));
- else
- val = data->value;
- for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
- if (mask[idx]) {
- if (dst_width == 48) {
- /*special case for MAC addresses */
- value[idx] = rte_cpu_to_be_16(val);
- val >>= 16;
- dst_width -= 16;
- } else if (dst_width > 16) {
- value[idx] = rte_cpu_to_be_32(val);
- val >>= 32;
- } else if (dst_width > 8) {
- value[idx] = rte_cpu_to_be_16(val);
- val >>= 16;
- } else {
- value[idx] = (uint8_t)val;
- val >>= 8;
- }
- if (!val)
- break;
- }
- }
- break;
default:
MLX5_ASSERT(false);
break;
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_action_modify_field *conf =
(const struct rte_flow_action_modify_field *)(action->conf);
- struct rte_flow_item item;
+ struct rte_flow_item item = {
+ .spec = NULL,
+ .mask = NULL
+ };
struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, 0} };
struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, 0} };
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
- uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t type;
- uint32_t dst_width = mlx5_flow_item_field_width(config,
- conf->dst.field);
+ uint32_t shift = 0;
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
- conf->src.field == RTE_FLOW_FIELD_VALUE) {
+ conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = MLX5_MODIFICATION_TYPE_SET;
/** For SET fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
- value, conf->width, dst_width, dev, attr, error);
- /** Then copy immediate value from source as per mask. */
- mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
- value, conf->width, dst_width, dev, attr, error);
- item.spec = &value;
+ conf->width, &shift, dev,
+ attr, error);
+ item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
+ (void *)(uintptr_t)conf->src.pvalue :
+ (void *)(uintptr_t)&conf->src.value;
} else {
type = MLX5_MODIFICATION_TYPE_COPY;
/** For COPY fill the destination field (dcopy) without mask. */
mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
- value, conf->width, dst_width, dev, attr, error);
+ conf->width, &shift, dev,
+ attr, error);
/** Then construct the source field (field) with mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
- value, conf->width, dst_width, dev, attr, error);
+ conf->width, &shift,
+ dev, attr, error);
}
item.mask = &mask;
return flow_dv_convert_modify_action(&item,
{
const struct rte_flow_item_gtp *gtp_spec;
const struct rte_flow_item_gtp *gtp_mask;
- const struct rte_flow_item_gtp_psc *spec;
const struct rte_flow_item_gtp_psc *mask;
const struct rte_flow_item_gtp_psc nic_mask = {
- .pdu_type = 0xFF,
- .qfi = 0xFF,
+ .hdr.type = 0xF,
+ .hdr.qfi = 0x3F,
};
if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
/* GTP spec is here and E flag is requested to match zero. */
if (!item->spec)
return 0;
- spec = item->spec;
mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
- if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "PDU type should be smaller than 16");
return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_gtp_psc),
struct rte_flow_error *error)
{
const struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ bool direction_error = false;
- (void)action;
- (void)attr;
if (!priv->sh->pop_vlan_action)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"pop vlan action is not supported");
- if (attr->egress)
+ /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
+ if (attr->transfer) {
+ bool fdb_tx = priv->representor_id != UINT16_MAX;
+ bool is_cx5 = sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+
+ if (fdb_tx && is_cx5)
+ direction_error = true;
+ } else if (attr->egress) {
+ direction_error = true;
+ }
+ if (direction_error)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL,
- "pop vlan action not supported for "
- "egress");
+ "pop vlan action not supported for egress");
if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ bool direction_error = false;
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
+ /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
+ if (attr->transfer) {
+ bool fdb_tx = priv->representor_id != UINT16_MAX;
+ bool is_cx5 = sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+
+ if (!fdb_tx && is_cx5)
+ direction_error = true;
+ } else if (attr->ingress) {
+ direction_error = true;
+ }
+ if (direction_error)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "push vlan action not supported for ingress");
if (!attr->transfer && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
return 0;
}
-/**
- * Check if action counter is shared by either old or new mechanism.
- *
- * @param[in] action
- * Pointer to the action structure.
- *
- * @return
- * True when counter is shared, false otherwise.
- */
-static inline bool
-is_shared_action_count(const struct rte_flow_action *action)
-{
- const struct rte_flow_action_count *count =
- (const struct rte_flow_action_count *)action->conf;
-
- if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
- return true;
- return !!(count && count->shared);
-}
-
/**
* Validate count action.
*
struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_action_modify_field *action_modify_field =
action->conf;
- uint32_t dst_width = mlx5_flow_item_field_width(config,
- action_modify_field->dst.field);
- uint32_t src_width = mlx5_flow_item_field_width(config,
- action_modify_field->src.field);
+ uint32_t dst_width = mlx5_flow_item_field_width(priv,
+ action_modify_field->dst.field, -1);
+ uint32_t src_width = mlx5_flow_item_field_width(priv,
+ action_modify_field->src.field, dst_width);
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (ret)
"source and destination fields"
" cannot be the same");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
- action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
+ action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
+ action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "immediate value or a pointer to it"
+ "mark, immediate value or a pointer to it"
" cannot be used as a destination");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
action_modify_field->src.field == RTE_FLOW_FIELD_START)
}
/*
- * Validate the port_id action.
+ * Validate action PORT_ID / REPRESENTED_PORT.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
* @param[in] action
- * Port_id RTE action structure.
+ * PORT_ID / REPRESENTED_PORT action structure.
* @param[in] attr
* Attributes of flow that includes this action.
* @param[out] error
struct rte_flow_error *error)
{
const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_ethdev *ethdev;
struct mlx5_priv *act_priv;
struct mlx5_priv *dev_priv;
uint16_t port;
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "port id action is valid in transfer"
+ "port action is valid in transfer"
" mode only");
if (!action || !action->conf)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL,
- "port id action parameters must be"
+ "port action parameters must be"
" specified");
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
- port_id = action->conf;
- port = port_id->original ? dev->data->port_id : port_id->id;
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ port_id = action->conf;
+ port = port_id->original ? dev->data->port_id : port_id->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ethdev = action->conf;
+ port = ethdev->port_id;
+ break;
+ default:
+ MLX5_ASSERT(false);
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unknown E-Switch action");
+ }
act_priv = mlx5_port_to_eswitch_info(port, false);
if (!act_priv)
return rte_flow_error_set
(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
"failed to obtain E-Switch port id for port");
if (act_priv->domain_id != dev_priv->domain_id)
return rte_flow_error_set
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count
- (dev, is_shared_action_count(act),
- *action_flags | sub_action_flags,
+ (dev, false, *action_flags | sub_action_flags,
error);
if (ret < 0)
return ret;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
ret = flow_dv_validate_action_port_id(dev,
sub_action_flags,
act,
return 0;
}
-/**
- * Allocate a shared flow counter.
- *
- * @param[in] ctx
- * Pointer to the shared counter configuration.
- * @param[in] data
- * Pointer to save the allocated counter index.
- *
- * @return
- * Index to flow counter on success, 0 otherwise and rte_errno is set.
- */
-
-static int32_t
-flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
-{
- struct mlx5_shared_counter_conf *conf = ctx;
- struct rte_eth_dev *dev = conf->dev;
- struct mlx5_flow_counter *cnt;
-
- data->dword = flow_dv_counter_alloc(dev, 0);
- data->dword |= MLX5_CNT_SHARED_OFFSET;
- cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
- cnt->shared_info.id = conf->id;
- return 0;
-}
-
-/**
- * Get a shared flow counter.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] id
- * Counter identifier.
- *
- * @return
- * Index to flow counter on success, 0 otherwise and rte_errno is set.
- */
-static uint32_t
-flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_counter_conf conf = {
- .dev = dev,
- .id = id,
- };
- union mlx5_l3t_data data = {
- .dword = 0,
- };
-
- mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
- flow_dv_counter_alloc_shared_cb, &conf);
- return data.dword;
-}
-
/**
* Get age param from counter index.
*
if (pool->is_aged) {
flow_dv_counter_remove_from_age(dev, counter, cnt);
} else {
- /*
- * If the counter action is shared by ID, the l3t_clear_entry
- * function reduces its references counter. If after the
- * reduction the action is still referenced, the function
- * returns here and does not release it.
- */
- if (IS_LEGACY_SHARED_CNT(counter) &&
- mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
- cnt->shared_info.id))
- return;
/*
* If the counter action is shared by indirect action API,
* the atomic function reduces its references counter.
* If after the reduction the action is still referenced, the
* function returns here and does not release it.
- * When the counter action is not shared neither by ID nor by
+ * When the counter action is not shared by
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (!IS_LEGACY_SHARED_CNT(counter) &&
- __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
+ if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
__ATOMIC_RELAXED))
return;
}
const struct rte_flow_item *rule_items = items;
const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
+ uint16_t udp_dport = 0;
if (items == NULL)
return -1;
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
error);
+ const struct rte_flow_item_udp *spec = items->spec;
+ const struct rte_flow_item_udp *mask = items->mask;
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (spec != NULL)
+ udp_dport = rte_be_to_cpu_16
+ (spec->hdr.dst_port &
+ mask->hdr.dst_port);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_validate_item_vxlan(dev, items,
- item_flags, attr,
- error);
+ ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
+ items, item_flags,
+ attr, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
ret = flow_dv_validate_action_port_id(dev,
action_flags,
actions,
++actions_n;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+ shared_count = true;
+ /* fall-through. */
case RTE_FLOW_ACTION_TYPE_COUNT:
- shared_count = is_shared_action_count(actions);
ret = flow_dv_validate_action_count(dev, shared_count,
action_flags,
error);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
+ dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
if (!vxlan_v)
return;
if (!vxlan_m) {
else
vxlan_m = &nic_mask;
}
- if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
+ if ((priv->sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
+ dport != MLX5_UDP_PORT_VXLAN) ||
+ (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
void *misc_m;
void *misc_v;
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
geneve_opt_v->option_len + 1);
}
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
/* Set the data. */
if (geneve_opt_v->data) {
memcpy(&opt_data_key, geneve_opt_v->data,
if (!gtp_psc_m)
gtp_psc_m = &rte_flow_item_gtp_psc_mask;
dw_0.w32 = 0;
- dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
- dw_0.qfi = gtp_psc_m->qfi;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
+ dw_0.qfi = gtp_psc_m->hdr.qfi;
MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
rte_cpu_to_be_32(dw_0.w32));
dw_0.w32 = 0;
- dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
- gtp_psc_m->pdu_type);
- dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
+ gtp_psc_m->hdr.type);
+ dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
rte_cpu_to_be_32(dw_0.w32));
}
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] samples
- * Sample IDs to be used in the matching.
+ * @param[in] last_item
+ * Last item flags.
*/
static void
flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
- void *key, const struct rte_flow_item *item)
+ void *key, const struct rte_flow_item *item,
+ uint64_t last_item)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ecpri *ecpri_m = item->mask;
void *dw_m;
void *dw_v;
+ /*
+ * In case of eCPRI over Ethernet, if EtherType is not specified,
+ * match on eCPRI EtherType implicitly.
+ */
+ if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
+ void *hdrs_m, *hdrs_v, *l2m, *l2v;
+
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
+ l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
+ if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
+ *(uint16_t *)l2m = UINT16_MAX;
+ *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
+ }
+ }
if (!ecpri_v)
return;
if (!ecpri_m)
}
/**
- * Translate port ID action to vport.
+ * Translate action PORT_ID / REPRESENTED_PORT to vport.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
- * Pointer to the port ID action.
+ * Pointer to action PORT_ID / REPRESENTED_PORT.
* @param[out] dst_port_id
* The target port ID.
* @param[out] error
{
uint32_t port;
struct mlx5_priv *priv;
- const struct rte_flow_action_port_id *conf =
- (const struct rte_flow_action_port_id *)action->conf;
- port = conf->original ? dev->data->port_id : conf->id;
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID: {
+ const struct rte_flow_action_port_id *conf;
+
+ conf = (const struct rte_flow_action_port_id *)action->conf;
+ port = conf->original ? dev->data->port_id : conf->id;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_action_ethdev *ethdev;
+
+ ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+ port = ethdev->port_id;
+ break;
+ }
+ default:
+ MLX5_ASSERT(false);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unknown E-Switch action");
+ }
+
priv = mlx5_port_to_eswitch_info(port, false);
if (!priv)
return rte_flow_error_set(error, -rte_errno,
static uint32_t
flow_dv_translate_create_counter(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
- const struct rte_flow_action_count *count,
+ const struct rte_flow_action_count *count
+ __rte_unused,
const struct rte_flow_action_age *age)
{
uint32_t counter;
struct mlx5_age_param *age_param;
- if (count && count->shared)
- counter = flow_dv_counter_get_shared(dev, count->id);
- else
- counter = flow_dv_counter_alloc(dev, !!age);
+ counter = flow_dv_counter_alloc(dev, !!age);
if (!counter || age == NULL)
return counter;
age_param = flow_dv_counter_idx_get_age(dev, counter);
dev_flow->hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss_desc->level >= 2) {
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ if (rss_desc->level >= 2)
rss_inner = 1;
- }
#endif
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
+ if (dev_flow->hash_fields == 0)
+ /*
+ * There is no match between the RSS types and the
+ * L3 protocol (IPv4/IPv6) defined in the flow rule.
+ */
+ return;
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
if (rss_types & ETH_RSS_UDP) {
dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
}
}
+ if (rss_inner)
+ dev_flow->hash_fields |= IBV_RX_HASH_INNER;
}
/**
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
rss_desc->shared_rss = 0;
+ if (rss_desc->hash_fields == 0)
+ rss_desc->queue_num = 1;
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
if (!*hrxq_idx)
return NULL;
break;
}
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
{
struct mlx5_flow_dv_port_id_action_resource
port_id_resource;
}
static inline int
-flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
+flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
+ struct rte_flow_error *error)
{
uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
struct rte_eth_dev *owndev = &rte_eth_devices[owner];
- RTE_SET_USED(dev);
+ int ret;
MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
if (dev->data->dev_started != 1)
- return -1;
- return flow_dv_aso_ct_dev_release(owndev, idx);
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Indirect CT action cannot be destroyed when the port is stopped");
+ ret = flow_dv_aso_ct_dev_release(owndev, idx);
+ if (ret < 0)
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Current state prevents indirect CT action from being destroyed");
+ return ret;
}
/*
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
MLX5_FLOW_FATE_QUEUE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
- flow->age = (uint32_t)(uintptr_t)(action->conf);
- age_act = flow_aso_age_get_by_idx(dev, flow->age);
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ age_act = flow_aso_age_get_by_idx(dev, owner_idx);
+ if (flow->age == 0) {
+ flow->age = owner_idx;
+ __atomic_fetch_add(&age_act->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
break;
action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
- flow->counter = (uint32_t)(uintptr_t)(action->conf);
- cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
NULL);
- __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
- __ATOMIC_RELAXED);
- /* Save information first, will apply later. */
- action_flags |= MLX5_FLOW_ACTION_COUNT;
+ MLX5_ASSERT(cnt_act != NULL);
+ /**
+ * When creating meter drop flow in drop table, the
+ * counter should not overwrite the rte flow counter.
+ */
+ if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
+ dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
+ dev_flow->dv.actions[actions_n++] =
+ cnt_act->action;
+ } else {
+ if (flow->counter == 0) {
+ flow->counter = owner_idx;
+ __atomic_fetch_add
+ (&cnt_act->shared_info.refcnt,
+ 1, __ATOMIC_RELAXED);
+ }
+ /* Save information first, will apply later. */
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ }
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
else
dev_flow->dv.actions[actions_n] =
ct->dr_action_rply;
- flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
- flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ if (flow->ct == 0) {
+ flow->indirect_type =
+ MLX5_INDIRECT_ACTION_TYPE_CT;
+ flow->ct = owner_idx;
+ __atomic_fetch_add(&ct->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
break;
* when they are not shared.
*/
if (action_flags & MLX5_FLOW_ACTION_AGE) {
- if ((non_shared_age &&
- count && !count->shared) ||
+ if ((non_shared_age && count) ||
!(priv->sh->flow_hit_aso_en &&
(attr->group || attr->transfer))) {
/* Creates age by counters. */
"cannot create eCPRI parser");
}
flow_dv_translate_item_ecpri(dev, match_mask,
- match_value, items);
+ match_value, items,
+ last_item);
/* No other protocol should follow eCPRI layer. */
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
#ifdef HAVE_MLX5DV_DR
/* DR supports drop action placeholder. */
MLX5_ASSERT(priv->sh->dr_drop_action);
- dv->actions[n++] = priv->sh->dr_drop_action;
+ dv->actions[n++] = dv->group ?
+ priv->sh->dr_drop_action :
+ priv->root_drop_action;
#else
/* For DV we use the explicit drop queue. */
MLX5_ASSERT(priv->drop_queue.hrxq);
}
/* Keep the current age handling by default. */
if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
- flow_dv_aso_ct_release(dev, flow->ct);
+ flow_dv_aso_ct_release(dev, flow->ct, NULL);
else if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
if (flow->geneve_tlv_option) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (remaining)
- return rte_flow_error_set(error, EBUSY,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "shared rss hrxq has references");
if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
0, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss has references");
+ remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (remaining)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss hrxq has references");
queue = shared_rss->ind_tbl->queues;
remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
if (remaining)
" released with references %d.", idx, ret);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_CT:
- ret = flow_dv_aso_ct_release(dev, idx);
+ ret = flow_dv_aso_ct_release(dev, idx, error);
if (ret < 0)
return ret;
if (ret > 0)
policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
next_fm = mlx5_flow_meter_find(priv,
policy->act_cnt[i].next_mtr_id, NULL);
- TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
+ RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
next_port, tmp) {
claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
tbl = container_of(color_rule->matcher->tbl,
break;
}
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
{
struct mlx5_flow_dv_port_id_action_resource
port_id_resource;
}
rte_spinlock_unlock(&mtr_policy->sl);
}
+/**
+ * Check whether the DR drop action is supported on the root table or not.
+ *
+ * Create a simple flow with DR drop action on root table to validate
+ * if DR drop action on root table is supported or not.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_match_params mask = {
+ .size = sizeof(mask.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 0,
+ .match_criteria_enable = 0,
+ .match_mask = (void *)&mask,
+ };
+ struct mlx5_flow_tbl_resource *tbl = NULL;
+ void *matcher = NULL;
+ void *flow = NULL;
+ int ret = -1;
+
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
+ 0, 0, 0, NULL);
+ if (!tbl)
+ goto err;
+ dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
+ __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &matcher);
+ if (ret)
+ goto err;
+ __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
+ ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
+ &sh->dr_drop_action, &flow);
+err:
+ /*
+ * If DR drop action is not supported on root table, flow create will
+ * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
+ */
+ if (!flow) {
+ if (matcher &&
+ (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
+ DRV_LOG(INFO, "DR drop action is not supported in root table.");
+ else
+ DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
+ ret = -1;
+ } else {
+ claim_zero(mlx5_flow_os_destroy_flow(flow));
+ }
+ if (matcher)
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
+ if (tbl)
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
+ return ret;
+}
/**
* Validate the batch counter support in root table.
* @note: only stub for now
*/
static int
-flow_get_aged_flows(struct rte_eth_dev *dev,
+flow_dv_get_aged_flows(struct rte_eth_dev *dev,
void **context,
uint32_t nb_contexts,
struct rte_flow_error *error)
"Indirect age action not supported");
return flow_dv_validate_action_age(0, action, dev, err);
case RTE_FLOW_ACTION_TYPE_COUNT:
- /*
- * There are two mechanisms to share the action count.
- * The old mechanism uses the shared field to share, while the
- * new mechanism uses the indirect action API.
- * This validation comes to make sure that the two mechanisms
- * are not combined.
- */
- if (is_shared_action_count(action))
- return rte_flow_error_set(err, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "Mix shared and indirect counter is not supported");
return flow_dv_validate_action_count(dev, true, 0, err);
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
if (!priv->sh->ct_aso_en)
RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
NULL,
"Multiple fate actions not supported.");
+ *hierarchy_domain = 0;
while (true) {
fm = mlx5_flow_meter_find(priv, meter_id, NULL);
if (!fm)
"Non termination meter not supported in hierarchy.");
policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
MLX5_ASSERT(policy);
- if (!policy->is_hierarchy) {
+ /**
+ * Only inherit the supported domains of the first meter in
+ * hierarchy.
+ * One meter supports at least one domain.
+ */
+ if (!*hierarchy_domain) {
if (policy->transfer)
*hierarchy_domain |=
MLX5_MTR_DOMAIN_TRANSFER_BIT;
MLX5_MTR_DOMAIN_INGRESS_BIT;
if (policy->egress)
*hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
+ }
+ if (!policy->is_hierarchy) {
*is_rss = policy->is_rss;
break;
}
NULL, "too many actions");
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
if (!priv->config.dv_esw_en)
return -rte_mtr_error_set(error,
ENOTSUP,
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL, flow_err.message ?
- flow_err.message :
- "Meter hierarchy only supports GREEN color.");
+ NULL,
+ "Meter hierarchy only supports GREEN color.");
+ if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "No yellow policy should be provided in meter hierarchy.");
mtr = act->conf;
ret = flow_dv_validate_policy_mtr_hierarchy(dev,
mtr->mtr_id,
* so MARK action is only in ingress domain.
*/
domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
- else if (action_flags[i] &
- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
- domain_color[i] = hierarchy_domain;
else
domain_color[i] = def_domain;
+ if (action_flags[i] &
+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
+ domain_color[i] &= hierarchy_domain;
/*
* Non-termination actions only support NIC Tx domain.
* The adjustion should be skipped when there is no
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
- .get_aged_flows = flow_get_aged_flows,
+ .get_aged_flows = flow_dv_get_aged_flows,
.action_validate = flow_dv_action_validate,
.action_create = flow_dv_action_create,
.action_destroy = flow_dv_action_destroy,