attr->valid = 1;
}
+/**
+ * Convert rte_mtr_color to mlx5 color.
+ *
+ * @param[in] rcol
+ * rte_mtr_color.
+ *
+ * @return
+ * mlx5 color.
+ */
+static int
+rte_col_2_mlx5_col(enum rte_color rcol)
+{
+ switch (rcol) {
+ case RTE_COLOR_GREEN:
+ return MLX5_FLOW_COLOR_GREEN;
+ case RTE_COLOR_YELLOW:
+ return MLX5_FLOW_COLOR_YELLOW;
+ case RTE_COLOR_RED:
+ return MLX5_FLOW_COLOR_RED;
+ default:
+ break;
+ }
+ return MLX5_FLOW_COLOR_UNDEFINED;
+}
+
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
}
static enum mlx5_modification_field reg_to_field[] = {
+ [REG_NONE] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
[REG_C_0] = MLX5_MODI_META_REG_C_0,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
+ assert(conf->id != REG_NONE);
+ assert(conf->id < RTE_DIM(reg_to_field));
actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
actions[i].field = reg_to_field[conf->id];
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
- actions[i].data1 = conf->data;
+ actions[i].data1 = rte_cpu_to_be_32(conf->data);
++i;
resource->actions_num = i;
if (!resource->actions_num)
return 0;
}
+/**
+ * Convert SET_TAG action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] conf
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_set_tag
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action_set_tag *conf,
+ struct rte_flow_error *error)
+{
+ rte_be32_t data = rte_cpu_to_be_32(conf->data);
+ rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
+ struct rte_flow_item item = {
+ .spec = &data,
+ .mask = &mask,
+ };
+ struct field_modify_info reg_c_x[] = {
+ [1] = {0, 0, 0},
+ };
+ enum mlx5_modification_field reg_type;
+ int ret;
+
+ ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
+ if (ret < 0)
+ return ret;
+ assert(ret != REG_NONE);
+ assert((unsigned int)ret < RTE_DIM(reg_to_field));
+ reg_type = reg_to_field[ret];
+ assert(reg_type > 0);
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
+ return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
/**
* Convert internal COPY_REG action to DV specification.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev __rte_unused,
+flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
struct mlx5_flow_dv_modify_hdr_resource *res,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct mlx5_flow_action_copy_mreg *conf = action->conf;
- uint32_t mask = RTE_BE32(UINT32_MAX);
+ rte_be32_t mask = RTE_BE32(UINT32_MAX);
struct rte_flow_item item = {
.spec = NULL,
.mask = &mask,
{0, 0, 0},
};
struct field_modify_info reg_dst = {
- .offset = (uint32_t)-1, /* Same as src. */
+ .offset = 0,
.id = reg_to_field[conf->dst],
};
+ /* Adjust reg_c[0] usage according to reported mask. */
+ if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t reg_c0 = priv->sh->dv_regc0_mask;
+
+ assert(reg_c0);
+ assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+ if (conf->dst == REG_C_0) {
+ /* Copy to reg_c[0], within mask only. */
+ reg_dst.offset = rte_bsf32(reg_c0);
+ /*
+ * Mask is ignoring the enianness, because
+ * there is no conversion in datapath.
+ */
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ /* Copy from destination lower bits to reg_c[0]. */
+ mask = reg_c0 >> reg_dst.offset;
+#else
+ /* Copy from destination upper bits to reg_c[0]. */
+ mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
+ rte_fls_u32(reg_c0));
+#endif
+ } else {
+ mask = rte_cpu_to_be_32(reg_c0);
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ /* Copy from reg_c[0] to destination lower bits. */
+ reg_dst.offset = 0;
+#else
+ /* Copy from reg_c[0] to destination upper bits. */
+ reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
+ (rte_fls_u32(reg_c0) -
+ rte_bsf32(reg_c0));
+#endif
+ }
+ }
return flow_dv_convert_modify_action(&item,
reg_src, ®_dst, res,
MLX5_MODIFICATION_TYPE_COPY,
error);
}
+/**
+ * Convert MARK action to DV specification. This routine is used
+ * in extensive metadata only and requires metadata register to be
+ * handled. In legacy mode hardware tag resource is engaged.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] conf
+ * Pointer to MARK action specification.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_mark(struct rte_eth_dev *dev,
+ const struct rte_flow_action_mark *conf,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
+ priv->sh->dv_mark_mask);
+ rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
+ struct rte_flow_item item = {
+ .spec = &data,
+ .mask = &mask,
+ };
+ struct field_modify_info reg_c_x[] = {
+ {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
+ {0, 0, 0},
+ };
+ enum modify_reg reg;
+
+ if (!mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "zero mark action mask");
+ reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ if (reg < 0)
+ return reg;
+ assert(reg > 0);
+ reg_c_x[0].id = reg_to_field[reg];
+ return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Get metadata register index for specified steering domain.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow to determine steering domain.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * positive index on success, a negative errno value otherwise
+ * and rte_errno is set.
+ */
+static enum modify_reg
+flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ enum modify_reg reg =
+ mlx5_flow_get_reg_id(dev, attr->transfer ?
+ MLX5_METADATA_FDB :
+ attr->egress ?
+ MLX5_METADATA_TX :
+ MLX5_METADATA_RX, 0, error);
+ if (reg < 0)
+ return rte_flow_error_set(error,
+ ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unavailable "
+ "metadata register");
+ return reg;
+}
+
+/**
+ * Convert SET_META action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] conf
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_set_meta
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action_set_meta *conf,
+ struct rte_flow_error *error)
+{
+ uint32_t data = conf->data;
+ uint32_t mask = conf->mask;
+ struct rte_flow_item item = {
+ .spec = &data,
+ .mask = &mask,
+ };
+ struct field_modify_info reg_c_x[] = {
+ [1] = {0, 0, 0},
+ };
+ enum modify_reg reg = flow_dv_get_metadata_reg(dev, attr, error);
+
+ if (reg < 0)
+ return reg;
+ /*
+ * In datapath code there is no endianness
+ * coversions for perfromance reasons, all
+ * pattern conversions are done in rte_flow.
+ */
+ if (reg == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0;
+
+ assert(msk_c0);
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ shl_c0 = rte_bsf32(msk_c0);
+#else
+ shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
+#endif
+ mask <<= shl_c0;
+ data <<= shl_c0;
+ assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+ }
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
+ /* The routine expects parameters in memory as big-endian ones. */
+ return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Validate MARK item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_mark(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr __rte_unused,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_item_mark *spec = item->spec;
+ const struct rte_flow_item_mark *mask = item->mask;
+ const struct rte_flow_item_mark nic_mask = {
+ .id = priv->sh->dv_mark_mask,
+ };
+ int ret;
+
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata feature"
+ " isn't enabled");
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata register"
+ " isn't supported");
+ if (!nic_mask.id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata register"
+ " isn't available");
+ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ if (!spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "data cannot be empty");
+ if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &spec->id,
+ "mark id exceeds the limit");
+ if (!mask)
+ mask = &nic_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_mark),
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
/**
* Validate META item.
*
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_meta *spec = item->spec;
const struct rte_flow_item_meta *mask = item->mask;
- const struct rte_flow_item_meta nic_mask = {
+ struct rte_flow_item_meta nic_mask = {
.data = UINT32_MAX
};
+ enum modify_reg reg;
int ret;
if (!spec)
"data cannot be empty");
if (!spec->data)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- NULL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
"data cannot be zero");
+ if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata register"
+ " isn't supported");
+ reg = flow_dv_get_metadata_reg(dev, attr, error);
+ if (reg < 0)
+ return reg;
+ if (reg == REG_B)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on reg_b "
+ "isn't supported");
+ if (reg != REG_A)
+ nic_mask.data = priv->sh->dv_meta_mask;
+ }
if (!mask)
mask = &rte_flow_item_meta_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
error);
+ return ret;
+}
+
+/**
+ * Validate TAG item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_tag(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr __rte_unused,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tag *spec = item->spec;
+ const struct rte_flow_item_tag *mask = item->mask;
+ const struct rte_flow_item_tag nic_mask = {
+ .data = RTE_BE32(UINT32_MAX),
+ .index = 0xff,
+ };
+ int ret;
+
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extensive metadata register"
+ " isn't supported");
+ if (!spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "data cannot be empty");
+ if (!mask)
+ mask = &rte_flow_item_tag_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_tag),
+ error);
if (ret < 0)
return ret;
- if (attr->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "pattern not supported for ingress");
+ if (mask->index != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "partial mask for tag index"
+ " is not supported");
+ ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
+ if (ret < 0)
+ return ret;
+ assert(ret != REG_NONE);
return 0;
}
return ret;
if (!spec)
return 0;
- esw_priv = mlx5_port_to_eswitch_info(spec->id);
+ esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
if (!esw_priv)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
return 0;
}
-/**
- * Validate count action.
+/*
+ * Validate the FLAG action.
*
* @param[in] dev
- * device otr.
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] attr
+ * Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_count(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+flow_dv_validate_action_flag(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ int ret;
- if (!priv->config.devx)
- goto notsup_err;
-#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
+ /* Fall back if no extended metadata register support. */
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
+ return mlx5_flow_validate_action_flag(action_flags, attr,
+ error);
+ /* Extensive metadata mode requires registers. */
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "no metadata registers "
+ "to support flag action");
+ if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "extended metadata register"
+ " isn't available");
+ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ assert(ret > 0);
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't mark and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 flag"
+ " actions in same flow");
return 0;
-#endif
-notsup_err:
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "count action not supported");
}
/**
- * Validate the L2 encap action.
+ * Validate MARK action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_mark(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_action_mark *mark = action->conf;
+ int ret;
+
+ /* Fall back if no extended metadata register support. */
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
+ return mlx5_flow_validate_action_mark(action, action_flags,
+ attr, error);
+ /* Extensive metadata mode requires registers. */
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "no metadata registers "
+ "to support mark action");
+ if (!priv->sh->dv_mark_mask)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "extended metadata register"
+ " isn't available");
+ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ assert(ret > 0);
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id exceeds the limit");
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't flag and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 mark actions in same"
+ " flow");
+ return 0;
+}
+
+/**
+ * Validate SET_META action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_meta *conf;
+ uint32_t nic_mask = UINT32_MAX;
+ enum modify_reg reg;
+
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "extended metadata register"
+ " isn't supported");
+ reg = flow_dv_get_metadata_reg(dev, attr, error);
+ if (reg < 0)
+ return reg;
+ if (reg != REG_A && reg != REG_B) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ nic_mask = priv->sh->dv_meta_mask;
+ }
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ conf = (const struct rte_flow_action_set_meta *)action->conf;
+ if (!conf->mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "zero mask doesn't have any effect");
+ if (conf->mask & ~nic_mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "meta data must be within reg C0");
+ if (!(conf->data & conf->mask))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "zero value has no effect");
+ return 0;
+}
+
+/**
+ * Validate SET_TAG action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_tag *conf;
+ const uint64_t terminal_action_flags =
+ MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS;
+ int ret;
+
+ if (!mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "extensive metadata register"
+ " isn't supported");
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ conf = (const struct rte_flow_action_set_tag *)action->conf;
+ if (!conf->mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "zero mask doesn't have any effect");
+ ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
+ if (ret < 0)
+ return ret;
+ if (!attr->transfer && attr->ingress &&
+ (action_flags & terminal_action_flags))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "set_tag has no effect"
+ " with terminal actions");
+ return 0;
+}
+
+/**
+ * Validate count action.
+ *
+ * @param[in] dev
+ * device otr.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_count(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->config.devx)
+ goto notsup_err;
+#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
+ return 0;
+#endif
+notsup_err:
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+}
+
+/**
+ * Validate the L2 encap action.
*
* @param[in] action_flags
* Holds the actions detected until now.
/**
* Find existing encap/decap resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to encap/decap resource.
/**
* Find existing table jump resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to jump table resource.
+ * @param[in, out] tbl
+ * Pointer to flow table resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
*/
static int
flow_dv_jump_tbl_resource_register
- (struct rte_eth_dev *dev,
- struct mlx5_flow_dv_jump_tbl_resource *resource,
+ (struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow_tbl_resource *tbl,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+ int cnt;
- /* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
- if (resource->tbl == cache_resource->tbl) {
- DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.jump = cache_resource;
- return 0;
- }
- }
- /* Register new jump table resource. */
- cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (resource->tbl->obj);
- if (!cache_resource->action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
+ assert(tbl);
+ cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
+ if (!cnt) {
+ tbl_data->jump.action =
+ mlx5_glue->dr_create_flow_action_dest_flow_tbl
+ (tbl->obj);
+ if (!tbl_data->jump.action)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create jump action");
+ DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
+ (void *)&tbl_data->jump, cnt);
+ } else {
+ assert(tbl_data->jump.action);
+ DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
+ (void *)&tbl_data->jump, cnt);
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
- dev_flow->dv.jump = cache_resource;
- DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&tbl_data->jump.refcnt);
+ dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
/**
* Find existing table port ID resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
+ /*
+ * Depending on rdma_core version the glue routine calls
+ * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+ * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
+ */
cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_vport
+ mlx5_glue->dr_create_flow_action_dest_port
(priv->sh->fdb_domain, resource->port_id);
if (!cache_resource->action) {
rte_free(cache_resource);
/**
* Find existing push vlan resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param [in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
- uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
- MLX5_MAX_TABLES;
uint32_t target_group, table;
int ret = 0;
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 fate actions in"
" same flow");
+ if (action_flags & MLX5_FLOW_ACTION_METER)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "jump with meter not support");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&table, error);
if (ret)
return ret;
- if (table >= max_group)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
- "target group index out of range");
- if (attributes->group >= target_group)
+ if (attributes->group == target_group)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "target group must be higher than"
+ "target group must be other than"
" the current flow group");
return 0;
}
"failed to obtain E-Switch info");
port_id = action->conf;
port = port_id->original ? dev->data->port_id : port_id->id;
- act_priv = mlx5_port_to_eswitch_info(port);
+ act_priv = mlx5_port_to_eswitch_info(port, false);
if (!act_priv)
return rte_flow_error_set
(error, rte_errno,
return 0;
}
+/**
+ * Get the maximum number of modify header actions.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Max number of modify header actions device can support.
+ */
+static unsigned int
+flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev)
+{
+ /*
+ * There's no way to directly query the max cap. Although it has to be
+ * acquried by iterative trial, it is a safe assumption that more
+ * actions are supported by FW if extensive metadata register is
+ * supported.
+ */
+ return mlx5_flow_ext_mreg_supported(dev) ? MLX5_MODIFY_NUM :
+ MLX5_MODIFY_NUM_NO_MREG;
+}
+
+/**
+ * Validate the meter action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the meter action.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_meter *am = action->conf;
+ struct mlx5_flow_meter *fm = mlx5_flow_meter_find(priv, am->mtr_id);
+
+ if (action_flags & MLX5_FLOW_ACTION_METER)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "meter chaining not support");
+ if (action_flags & MLX5_FLOW_ACTION_JUMP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "meter with jump not support");
+ if (!priv->mtr_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "meter action not supported");
+ if (!fm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Meter not found");
+ if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
+ (!fm->attr.ingress && !attr->ingress && attr->egress) ||
+ (!fm->attr.egress && !attr->egress && attr->ingress))))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Flow attributes are either invalid "
+ "or have a conflict with current "
+ "meter attributes");
+ return 0;
+}
+
/**
* Find existing modify-header resource or create and register a new one.
*
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
+ if (resource->actions_num > flow_dv_modify_hdr_action_max(dev))
+ return rte_flow_error_set(error, EOVERFLOW,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many modify header items");
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
ns = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
NULL,
"groups are not supported");
#else
- uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
- MLX5_MAX_TABLES;
uint32_t table;
int ret;
&table, error);
if (ret)
return ret;
- if (table >= max_group)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
- "group index out of range");
#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
.dst_port = RTE_BE16(UINT16_MAX),
}
};
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
if (items == NULL)
return -1;
return ret;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
+
+ case RTE_FLOW_ITEM_TYPE_MARK:
+ ret = flow_dv_validate_item_mark(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_MARK;
+ break;
case RTE_FLOW_ITEM_TYPE_META:
ret = flow_dv_validate_item_meta(dev, items, attr,
error);
return ret;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
+ case RTE_FLOW_ITEM_TYPE_TAG:
+ ret = flow_dv_validate_item_tag(dev, items,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_TAG;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
break;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_validate_action_flag(action_flags,
- attr, error);
+ ret = flow_dv_validate_action_flag(dev, action_flags,
+ attr, error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_FLAG;
- ++actions_n;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_FLAG |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ ++actions_n;
+ }
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_validate_action_mark(actions,
- action_flags,
- attr, error);
+ ret = flow_dv_validate_action_mark(dev, actions,
+ action_flags,
+ attr, error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_MARK;
- ++actions_n;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ ret = flow_dv_validate_action_set_meta(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ ret = flow_dv_validate_action_set_tag(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
ret = mlx5_flow_validate_action_drop(action_flags,
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ ret = mlx5_flow_validate_action_meter(dev,
+ action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_METER;
+ ++actions_n;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
" actions in the same rule");
/* Eswitch has few restrictions on using items and actions */
if (attr->transfer) {
- if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_FLAG)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action FLAG");
- if (action_flags & MLX5_FLOW_ACTION_MARK)
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
}
}
+/**
+ * Add MARK item to matcher
+ *
+ * @param[in] dev
+ * The device to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_mark(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_mark *mark;
+ uint32_t value;
+ uint32_t mask;
+
+ mark = item->mask ? (const void *)item->mask :
+ &rte_flow_item_mark_mask;
+ mask = mark->id & priv->sh->dv_mark_mask;
+ mark = (const void *)item->spec;
+ assert(mark);
+ value = mark->id & priv->sh->dv_mark_mask & mask;
+ if (mask) {
+ enum modify_reg reg;
+
+ /* Get the metadata register index for the mark. */
+ reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
+ assert(reg > 0);
+ flow_dv_match_meta_reg(matcher, key, reg, value, mask);
+ }
+}
+
/**
* Add META item to matcher
*
+ * @param[in] dev
+ * The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
*/
static void
-flow_dv_translate_item_meta(void *matcher, void *key,
+flow_dv_translate_item_meta(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *item)
{
const struct rte_flow_item_meta *meta_m;
if (!meta_m)
meta_m = &rte_flow_item_meta_mask;
meta_v = (const void *)item->spec;
- if (meta_v)
- flow_dv_match_meta_reg(matcher, key, REG_A,
- rte_cpu_to_be_32(meta_v->data),
- rte_cpu_to_be_32(meta_m->data));
+ if (meta_v) {
+ enum modify_reg reg;
+ uint32_t value = meta_v->data;
+ uint32_t mask = meta_m->data;
+
+ reg = flow_dv_get_metadata_reg(dev, attr, NULL);
+ if (reg < 0)
+ return;
+ /*
+ * In datapath code there is no endianness
+ * coversions for perfromance reasons, all
+ * pattern conversions are done in rte_flow.
+ */
+ value = rte_cpu_to_be_32(value);
+ mask = rte_cpu_to_be_32(mask);
+ if (reg == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ msk_c0 = rte_cpu_to_be_32(msk_c0);
+ value <<= shl_c0;
+ mask <<= shl_c0;
+ assert(msk_c0);
+ assert(!(~msk_c0 & mask));
+ }
+ flow_dv_match_meta_reg(matcher, key, reg, value, mask);
+ }
}
/**
{
const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
- enum modify_reg reg = tag_v->id;
+ assert(tag_v);
+ flow_dv_match_meta_reg(matcher, key, tag_v->id, tag_v->data,
+ tag_m ? tag_m->data : UINT32_MAX);
+}
+
+/**
+ * Add TAG item to matcher
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_tag(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_tag *tag_v = item->spec;
+ const struct rte_flow_item_tag *tag_m = item->mask;
+ enum modify_reg reg;
+
+ assert(tag_v);
+ tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
+ /* Get the metadata register index for the tag. */
+ reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
+ assert(reg > 0);
flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
}
mask = pid_m ? pid_m->id : 0xffff;
id = pid_v ? pid_v->id : dev->data->port_id;
- priv = mlx5_port_to_eswitch_info(id);
+ priv = mlx5_port_to_eswitch_info(id, item == NULL);
if (!priv)
return -rte_errno;
/* Translate to vport field or to metadata, depending on mode. */
/**
* Get a flow table.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in] table_id
* Table id to use.
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
-
-#ifdef HAVE_MLX5DV_DR
- if (transfer) {
- tbl = &sh->fdb_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->fdb_domain, table_id);
- } else if (egress) {
- tbl = &sh->tx_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->tx_domain, table_id);
- } else {
- tbl = &sh->rx_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->rx_domain, table_id);
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = table_id,
+ .reserved = 0,
+ .domain = !!transfer,
+ .direction = !!egress,
+ }
+ };
+ struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+ table_key.v64);
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ int ret;
+ void *domain;
+
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ tbl = &tbl_data->tbl;
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
}
- if (!tbl->obj) {
+ tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ if (!tbl_data) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create table");
+ NULL,
+ "cannot allocate flow table data entry");
return NULL;
}
- rte_atomic32_inc(&tbl->refcnt);
- return tbl;
-#else
- (void)error;
- (void)tbl;
+ tbl = &tbl_data->tbl;
+ pos = &tbl_data->entry;
if (transfer)
- return &sh->fdb_tbl[table_id];
+ domain = sh->fdb_domain;
else if (egress)
- return &sh->tx_tbl[table_id];
+ domain = sh->tx_domain;
else
- return &sh->rx_tbl[table_id];
-#endif
+ domain = sh->rx_domain;
+ tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
+ if (!tbl->obj) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create flow table object");
+ rte_free(tbl_data);
+ return NULL;
+ }
+ /*
+ * No multi-threads now, but still better to initialize the reference
+ * count before insert it into the hash list.
+ */
+ rte_atomic32_init(&tbl->refcnt);
+ /* Jump action reference count is initialized here. */
+ rte_atomic32_init(&tbl_data->jump.refcnt);
+ pos->key = table_key.v64;
+ ret = mlx5_hlist_insert(sh->flow_tbls, pos);
+ if (ret < 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot insert flow table data entry");
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ rte_free(tbl_data);
+ }
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
}
/**
* Release a flow table.
*
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
* @param[in] tbl
* Table resource to be released.
*
* Returns 0 if table was released, else return 1;
*/
static int
-flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
+flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_tbl_resource *tbl)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+
if (!tbl)
return 0;
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ struct mlx5_hlist_entry *pos = &tbl_data->entry;
+
mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
+ /* remove the entry from the hash list and free memory. */
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ rte_free(tbl_data);
return 0;
}
return 1;
/**
* Register the flow matcher.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] matcher
* Pointer to flow matcher.
+ * @param[in, out] key
+ * Pointer to flow table key.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
static int
flow_dv_matcher_register(struct rte_eth_dev *dev,
struct mlx5_flow_dv_matcher *matcher,
+ union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&matcher->mask,
};
- struct mlx5_flow_tbl_resource *tbl = NULL;
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
+ key->domain, error);
+ if (!tbl)
+ return -rte_errno; /* No need to refill the error info */
+ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
/* Lookup from cache. */
- LIST_FOREACH(cache_matcher, &sh->matchers, next) {
+ LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
if (matcher->crc == cache_matcher->crc &&
matcher->priority == cache_matcher->priority &&
- matcher->egress == cache_matcher->egress &&
- matcher->group == cache_matcher->group &&
- matcher->transfer == cache_matcher->transfer &&
!memcmp((const void *)matcher->mask.buf,
(const void *)cache_matcher->mask.buf,
cache_matcher->mask.size)) {
DRV_LOG(DEBUG,
- "priority %hd use %s matcher %p: refcnt %d++",
+ "%s group %u priority %hd use %s "
+ "matcher %p: refcnt %d++",
+ key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
- cache_matcher->egress ? "tx" : "rx",
+ key->direction ? "tx" : "rx",
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
dev_flow->dv.matcher = cache_matcher;
+ /* old matcher should not make the table ref++. */
+ flow_dv_tbl_resource_release(dev, tbl);
return 0;
}
}
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
- if (!cache_matcher)
+ if (!cache_matcher) {
+ flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
- tbl = flow_dv_tbl_resource_get(dev, matcher->group,
- matcher->egress, matcher->transfer,
- error);
- if (!tbl) {
- rte_free(cache_matcher);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create table");
}
*cache_matcher = *matcher;
dv_attr.match_criteria_enable =
flow_dv_matcher_enable(cache_matcher->mask.buf);
dv_attr.priority = matcher->priority;
- if (matcher->egress)
+ if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
- flow_dv_tbl_resource_release(tbl);
+ flow_dv_tbl_resource_release(dev, tbl);
#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
}
+ /* Save the table information */
+ cache_matcher->tbl = tbl;
+ rte_atomic32_init(&cache_matcher->refcnt);
+ /* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
- LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
+ LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
dev_flow->dv.matcher = cache_matcher;
- DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+ DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
+ key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
- cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+ key->direction ? "tx" : "rx", (void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
- rte_atomic32_inc(&tbl->refcnt);
return 0;
}
*
* @param dev[in, out]
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to tag resource.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
static int
flow_dv_tag_resource_register
(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_tag_resource *resource,
+ uint32_t tag_be24,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
+ struct mlx5_hlist_entry *entry;
/* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->tags, next) {
- if (resource->tag == cache_resource->tag) {
- DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
- return 0;
- }
+ entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+ if (entry) {
+ cache_resource = container_of
+ (entry, struct mlx5_flow_dv_tag_resource, entry);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.tag_resource = cache_resource;
+ DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
}
- /* Register new resource. */
+ /* Register new resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag
- (resource->tag);
+ cache_resource->entry.key = (uint64_t)tag_be24;
+ cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
+ if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
+ mlx5_glue->destroy_flow_action(cache_resource->action);
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot insert tag");
+ }
dev_flow->dv.tag_resource = cache_resource;
- DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
+ DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
flow_dv_tag_release(struct rte_eth_dev *dev,
struct mlx5_flow_dv_tag_resource *tag)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+
assert(tag);
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action(tag->action));
- LIST_REMOVE(tag, next);
+ mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
rte_free(tag);
(const struct rte_flow_action_port_id *)action->conf;
port = conf->original ? dev->data->port_id : conf->id;
- priv = mlx5_port_to_eswitch_info(port);
+ priv = mlx5_port_to_eswitch_info(port, false);
if (!priv)
return rte_flow_error_set(error, -rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"No eswitch info was found for port");
- if (priv->vport_meta_mask)
- *dst_port_id = priv->vport_meta_tag;
- else
- *dst_port_id = priv->vport_id;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ /*
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_ib_port().
+ */
+ *dst_port_id = priv->ibv_port;
+#else
+ /*
+ * Legacy mode, no LAG configurations is supported.
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_vport().
+ */
+ *dst_port_id = priv->vport_id;
+#endif
return 0;
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t last_item = 0;
MLX5DV_FLOW_TABLE_TYPE_NIC_RX
};
union flow_dv_attr flow_attr = { .attr = 0 };
- struct mlx5_flow_dv_tag_resource tag_resource;
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
if (attr->transfer)
mhdr_res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
- priority = priv->config.flow_prio - 1;
+ priority = dev_conf->flow_prio - 1;
for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action_count *count = action->conf;
const uint8_t *rss_key;
const struct rte_flow_action_jump *jump_data;
- struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
+ const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
uint32_t port_id = 0;
struct mlx5_flow_dv_port_id_action_resource port_id_resource;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
- tag_resource.tag =
- mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ struct rte_flow_action_mark mark = {
+ .id = MLX5_FLOW_MARK_DEFAULT,
+ };
+
+ if (flow_dv_convert_action_mark(dev, &mark,
+ &mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
- action_flags |= MLX5_FLOW_ACTION_FLAG;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- tag_resource.tag = mlx5_flow_mark_set
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (flow_dv_convert_action_mark(dev, mark,
+ &mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ /* Legacy (non-extensive) MARK action. */
+ tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
- action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ if (flow_dv_convert_action_set_meta
+ (dev, &mhdr_res, attr,
+ (const struct rte_flow_action_set_meta *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ if (flow_dv_convert_action_set_tag
+ (dev, &mhdr_res,
+ (const struct rte_flow_action_set_tag *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
action_flags |= MLX5_FLOW_ACTION_DROP;
action_flags |= MLX5_FLOW_ACTION_RSS;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- if (!priv->config.devx) {
+ if (!dev_conf->devx) {
rte_errno = ENOTSUP;
goto cnt_err;
}
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot create jump action.");
- jump_tbl_resource.tbl = tbl;
if (flow_dv_jump_tbl_resource_register
- (dev, &jump_tbl_resource, dev_flow, error)) {
- flow_dv_tbl_resource_release(tbl);
+ (dev, tbl, dev_flow, error)) {
+ flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_ACTION,
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ mtr = actions->conf;
+ if (!flow->meter) {
+ flow->meter = mlx5_flow_meter_attach(priv,
+ mtr->mtr_id, attr,
+ error);
+ if (!flow->meter)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "meter not found "
+ "or invalid parameters");
+ }
+ /* Set the meter action. */
+ dev_flow->dv.actions[actions_n++] =
+ flow->meter->mfts->meter_action;
+ action_flags |= MLX5_FLOW_ACTION_METER;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
- if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
+ if (mhdr_res.actions_num) {
/* create modify action if needed. */
if (flow_dv_modify_hdr_resource_register
(dev, &mhdr_res, dev_flow, error))
default:
break;
}
- if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
+ if (mhdr_res.actions_num &&
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
items, last_item, tunnel);
last_item = MLX5_FLOW_LAYER_MPLS;
break;
+ case RTE_FLOW_ITEM_TYPE_MARK:
+ flow_dv_translate_item_mark(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_MARK;
+ break;
case RTE_FLOW_ITEM_TYPE_META:
- flow_dv_translate_item_meta(match_mask, match_value,
- items);
+ flow_dv_translate_item_meta(dev, match_mask,
+ match_value, attr, items);
last_item = MLX5_FLOW_ITEM_METADATA;
break;
case RTE_FLOW_ITEM_TYPE_ICMP:
items, tunnel);
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
+ case RTE_FLOW_ITEM_TYPE_TAG:
+ flow_dv_translate_item_tag(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_TAG;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
flow_dv_translate_mlx5_item_tag(match_mask,
match_value, items);
matcher.mask.size);
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
- matcher.egress = attr->egress;
- matcher.group = dev_flow->group;
- matcher.transfer = attr->transfer;
- if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ /* reserved field no needs to be set to 0 here. */
+ tbl_key.domain = attr->transfer;
+ tbl_key.direction = attr->egress;
+ tbl_key.table_id = dev_flow->group;
+ if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
}
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_tbl_resource *tbl;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
- if (matcher->egress)
- tbl = &sh->tx_tbl[matcher->group];
- else
- tbl = &sh->rx_tbl[matcher->group];
- flow_dv_tbl_resource_release(tbl);
+ /* table ref-- in release interface. */
+ flow_dv_tbl_resource_release(dev, matcher->tbl);
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
/**
* Release an jump to table action resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param flow
* Pointer to mlx5_flow.
*
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow *flow)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
- flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(cache_resource,
+ struct mlx5_flow_tbl_data_entry, jump);
assert(cache_resource->action);
DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
- LIST_REMOVE(cache_resource, next);
- flow_dv_tbl_resource_release(cache_resource->tbl);
- rte_free(cache_resource);
+ /* jump action memory free is inside the table release. */
+ flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
DRV_LOG(DEBUG, "jump table resource %p: removed",
(void *)cache_resource);
return 0;
flow_dv_counter_release(dev, flow->counter);
flow->counter = NULL;
}
+ if (flow->meter) {
+ mlx5_flow_meter_detach(flow->meter);
+ flow->meter = NULL;
+ }
while (!LIST_EMPTY(&flow->dev_flows)) {
dev_flow = LIST_FIRST(&flow->dev_flows);
LIST_REMOVE(dev_flow, next);
if (dev_flow->dv.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_flow);
if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev_flow);
+ flow_dv_jump_tbl_resource_release(dev, dev_flow);
if (dev_flow->dv.port_id_action)
flow_dv_port_id_action_resource_release(dev_flow);
if (dev_flow->dv.push_vlan_res)
return ret;
}
+/**
+ * Destroy the meter table set.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] tbl
+ * Pointer to the meter table set.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
+ struct mlx5_meter_domains_infos *tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_meter_domains_infos *mtd =
+ (struct mlx5_meter_domains_infos *)tbl;
+
+ if (!mtd || !priv->config.dv_flow_en)
+ return 0;
+ if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
+ claim_zero(mlx5_glue->dv_destroy_flow
+ (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
+ if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
+ claim_zero(mlx5_glue->dv_destroy_flow
+ (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
+ if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
+ claim_zero(mlx5_glue->dv_destroy_flow
+ (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
+ if (mtd->egress.color_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->egress.color_matcher));
+ if (mtd->egress.any_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->egress.any_matcher));
+ if (mtd->egress.tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->egress.tbl));
+ if (mtd->ingress.color_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->ingress.color_matcher));
+ if (mtd->ingress.any_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->ingress.any_matcher));
+ if (mtd->ingress.tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->ingress.tbl));
+ if (mtd->transfer.color_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->transfer.color_matcher));
+ if (mtd->transfer.any_matcher)
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (mtd->transfer.any_matcher));
+ if (mtd->transfer.tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->transfer.tbl));
+ if (mtd->drop_actn)
+ claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
+ rte_free(mtd);
+ return 0;
+}
+
+/* Number of meter flow actions, count and jump or count and drop. */
+#define METER_ACTIONS 2
+
+/**
+ * Create specify domain meter table and suffix table.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in,out] mtb
+ * Pointer to DV meter table set.
+ * @param[in] egress
+ * Table attribute.
+ * @param[in] transfer
+ * Table attribute.
+ * @param[in] color_reg_c_idx
+ * Reg C index for color match.
+ *
+ * @return
+ * 0 on success, -1 otherwise and rte_errno is set.
+ */
+static int
+flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
+ struct mlx5_meter_domains_infos *mtb,
+ uint8_t egress, uint8_t transfer,
+ uint32_t color_reg_c_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_dv_match_params mask = {
+ .size = sizeof(mask.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 0,
+ .match_criteria_enable = 0,
+ .match_mask = (void *)&mask,
+ };
+ void *actions[METER_ACTIONS];
+ struct mlx5_flow_tbl_resource **sfx_tbl;
+ struct mlx5_meter_domain_info *dtb;
+ struct rte_flow_error error;
+ int i = 0;
+
+ if (transfer) {
+ sfx_tbl = &sh->fdb_mtr_sfx_tbl;
+ dtb = &mtb->transfer;
+ } else if (egress) {
+ sfx_tbl = &sh->tx_mtr_sfx_tbl;
+ dtb = &mtb->egress;
+ } else {
+ sfx_tbl = &sh->rx_mtr_sfx_tbl;
+ dtb = &mtb->ingress;
+ }
+ /* If the suffix table in missing, create it. */
+ if (!(*sfx_tbl)) {
+ *sfx_tbl = flow_dv_tbl_resource_get(dev,
+ MLX5_FLOW_TABLE_LEVEL_SUFFIX,
+ egress, transfer, &error);
+ if (!(*sfx_tbl)) {
+ DRV_LOG(ERR, "Failed to create meter suffix table.");
+ return -1;
+ }
+ }
+ /* Create the meter table with METER level. */
+ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
+ egress, transfer, &error);
+ if (!dtb->tbl) {
+ DRV_LOG(ERR, "Failed to create meter policer table.");
+ return -1;
+ }
+ /* Create matchers, Any and Color. */
+ dv_attr.priority = 3;
+ dv_attr.match_criteria_enable = 0;
+ dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
+ &dv_attr,
+ dtb->tbl->obj);
+ if (!dtb->any_matcher) {
+ DRV_LOG(ERR, "Failed to create meter"
+ " policer default matcher.");
+ goto error_exit;
+ }
+ dv_attr.priority = 0;
+ dv_attr.match_criteria_enable =
+ 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
+ flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
+ rte_col_2_mlx5_col(RTE_COLORS), UINT32_MAX);
+ dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
+ &dv_attr,
+ dtb->tbl->obj);
+ if (!dtb->color_matcher) {
+ DRV_LOG(ERR, "Failed to create meter policer color matcher.");
+ goto error_exit;
+ }
+ if (mtb->count_actns[RTE_MTR_DROPPED])
+ actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
+ actions[i++] = mtb->drop_actn;
+ /* Default rule: lowest priority, match any, actions: drop. */
+ dtb->policer_rules[RTE_MTR_DROPPED] =
+ mlx5_glue->dv_create_flow(dtb->any_matcher,
+ (void *)&value, i, actions);
+ if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
+ DRV_LOG(ERR, "Failed to create meter policer drop rule.");
+ goto error_exit;
+ }
+ return 0;
+error_exit:
+ return -1;
+}
+
+/**
+ * Create the needed meter and suffix tables.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] fm
+ * Pointer to the flow meter.
+ *
+ * @return
+ * Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_meter_domains_infos *
+flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
+ const struct mlx5_flow_meter *fm)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_meter_domains_infos *mtb;
+ int ret;
+ int i;
+
+ if (!priv->mtr_en) {
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+ if (!mtb) {
+ DRV_LOG(ERR, "Failed to allocate memory for meter.");
+ return NULL;
+ }
+ /* Create meter count actions */
+ for (i = 0; i <= RTE_MTR_DROPPED; i++) {
+ if (!fm->policer_stats.cnt[i])
+ continue;
+ mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
+ }
+ /* Create drop action. */
+ mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
+ if (!mtb->drop_actn) {
+ DRV_LOG(ERR, "Failed to create drop action.");
+ goto error_exit;
+ }
+ /* Egress meter table. */
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare egress meter table.");
+ goto error_exit;
+ }
+ /* Ingress meter table. */
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare ingress meter table.");
+ goto error_exit;
+ }
+ /* FDB meter table. */
+ if (priv->config.dv_esw_en) {
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
+ priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare fdb meter table.");
+ goto error_exit;
+ }
+ }
+ return mtb;
+error_exit:
+ flow_dv_destroy_mtr_tbl(dev, mtb);
+ return NULL;
+}
+
+/**
+ * Destroy domain policer rule.
+ *
+ * @param[in] dt
+ * Pointer to domain table.
+ */
+static void
+flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
+{
+ int i;
+
+ for (i = 0; i < RTE_MTR_DROPPED; i++) {
+ if (dt->policer_rules[i]) {
+ claim_zero(mlx5_glue->dv_destroy_flow
+ (dt->policer_rules[i]));
+ dt->policer_rules[i] = NULL;
+ }
+ }
+ if (dt->jump_actn) {
+ claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
+ dt->jump_actn = NULL;
+ }
+}
+
+/**
+ * Destroy policer rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
+ const struct mlx5_flow_meter *fm,
+ const struct rte_flow_attr *attr)
+{
+ struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
+
+ if (!mtb)
+ return 0;
+ if (attr->egress)
+ flow_dv_destroy_domain_policer_rule(&mtb->egress);
+ if (attr->ingress)
+ flow_dv_destroy_domain_policer_rule(&mtb->ingress);
+ if (attr->transfer)
+ flow_dv_destroy_domain_policer_rule(&mtb->transfer);
+ return 0;
+}
+
+/**
+ * Create specify domain meter policer rule.
+ *
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] mtb
+ * Pointer to DV meter table set.
+ * @param[in] sfx_tb
+ * Pointer to suffix table.
+ * @param[in] mtr_reg_c
+ * Color match REG_C.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+static int
+flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
+ struct mlx5_meter_domain_info *dtb,
+ struct mlx5_flow_tbl_resource *sfx_tb,
+ uint8_t mtr_reg_c)
+{
+ struct mlx5_flow_dv_match_params matcher = {
+ .size = sizeof(matcher.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5_meter_domains_infos *mtb = fm->mfts;
+ void *actions[METER_ACTIONS];
+ int i;
+
+ /* Create jump action. */
+ if (!sfx_tb)
+ return -1;
+ if (!dtb->jump_actn)
+ dtb->jump_actn =
+ mlx5_glue->dr_create_flow_action_dest_flow_tbl
+ (sfx_tb->obj);
+ if (!dtb->jump_actn) {
+ DRV_LOG(ERR, "Failed to create policer jump action.");
+ goto error;
+ }
+ for (i = 0; i < RTE_MTR_DROPPED; i++) {
+ int j = 0;
+
+ flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
+ rte_col_2_mlx5_col(i), UINT32_MAX);
+ if (mtb->count_actns[i])
+ actions[j++] = mtb->count_actns[i];
+ if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
+ actions[j++] = mtb->drop_actn;
+ else
+ actions[j++] = dtb->jump_actn;
+ dtb->policer_rules[i] =
+ mlx5_glue->dv_create_flow(dtb->color_matcher,
+ (void *)&value,
+ j, actions);
+ if (!dtb->policer_rules[i]) {
+ DRV_LOG(ERR, "Failed to create policer rule.");
+ goto error;
+ }
+ }
+ return 0;
+error:
+ rte_errno = errno;
+ return -1;
+}
+
+/**
+ * Create policer rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+static int
+flow_dv_create_policer_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter *fm,
+ const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_meter_domains_infos *mtb = fm->mfts;
+ int ret;
+
+ if (attr->egress) {
+ ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
+ priv->sh->tx_mtr_sfx_tbl,
+ priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create egress policer.");
+ goto error;
+ }
+ }
+ if (attr->ingress) {
+ ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
+ priv->sh->rx_mtr_sfx_tbl,
+ priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create ingress policer.");
+ goto error;
+ }
+ }
+ if (attr->transfer) {
+ ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
+ priv->sh->fdb_mtr_sfx_tbl,
+ priv->mtr_color_reg);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create transfer policer.");
+ goto error;
+ }
+ }
+ return 0;
+error:
+ flow_dv_destroy_policer_rules(dev, fm, attr);
+ return -1;
+}
+
+/**
+ * Query a devx counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] cnt
+ * Pointer to the flow counter.
+ * @param[in] clear
+ * Set to clear the counter statistics.
+ * @param[out] pkts
+ * The statistics value of packets.
+ * @param[out] bytes
+ * The statistics value of bytes.
+ *
+ * @return
+ * 0 on success, otherwise return -1.
+ */
+static int
+flow_dv_counter_query(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *cnt, bool clear,
+ uint64_t *pkts, uint64_t *bytes)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint64_t inn_pkts, inn_bytes;
+ int ret;
+
+ if (!priv->config.devx)
+ return -1;
+ ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
+ if (ret)
+ return -1;
+ *pkts = inn_pkts - cnt->hits;
+ *bytes = inn_bytes - cnt->bytes;
+ if (clear) {
+ cnt->hits = inn_pkts;
+ cnt->bytes = inn_bytes;
+ }
+ return 0;
+}
+
/*
* Mutex-protected thunk to lock-free __flow_dv_translate().
*/
flow_dv_shared_unlock(dev);
}
+/*
+ * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_allocate(struct rte_eth_dev *dev)
+{
+ struct mlx5_flow_counter *cnt;
+
+ flow_dv_shared_lock(dev);
+ cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
+ flow_dv_shared_unlock(dev);
+ return cnt;
+}
+
+/*
+ * Mutex-protected thunk to lock-free flow_dv_counter_release().
+ */
+static void
+flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
+{
+ flow_dv_shared_lock(dev);
+ flow_dv_counter_release(dev, cnt);
+ flow_dv_shared_unlock(dev);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.remove = flow_dv_remove,
.destroy = flow_dv_destroy,
.query = flow_dv_query,
+ .create_mtr_tbls = flow_dv_create_mtr_tbl,
+ .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
+ .create_policer_rules = flow_dv_create_policer_rules,
+ .destroy_policer_rules = flow_dv_destroy_policer_rules,
+ .counter_alloc = flow_dv_counter_allocate,
+ .counter_free = flow_dv_counter_free,
+ .counter_query = flow_dv_counter_query,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */