#include <rte_flow_driver.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
+#include <rte_bus_pci.h>
#include <rte_ip.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
+static int16_t
+flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_common_device *cdev = priv->sh->cdev;
+
+ if (cdev->config.hca_attr.esw_mgr_vport_id_valid)
+ return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id;
+
+ if (priv->pci_dev == NULL)
+ return 0;
+ switch (priv->pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
+ return (int16_t)0xfffe;
+ default:
+ return 0;
+ }
+}
+
/**
* Initialize flow attributes structure according to flow items' types.
*
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_GTP:
if (tunnel_decap)
attr->attr = 0;
break;
{0, 0, 0},
};
-static const struct rte_flow_item *
-mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
-{
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- switch (item->type) {
- default:
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- case RTE_FLOW_ITEM_TYPE_GRE:
- case RTE_FLOW_ITEM_TYPE_MPLS:
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- case RTE_FLOW_ITEM_TYPE_GENEVE:
- return item;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- case RTE_FLOW_ITEM_TYPE_IPV6:
- if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
- item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
- return item;
- break;
- }
- }
- return NULL;
-}
-
static void
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
uint8_t next_protocol, uint64_t *item_flags,
mlx5_list_match_cb cb_match,
mlx5_list_remove_cb cb_remove,
mlx5_list_clone_cb cb_clone,
- mlx5_list_clone_free_cb cb_clone_free)
+ mlx5_list_clone_free_cb cb_clone_free,
+ struct rte_flow_error *error)
{
struct mlx5_hlist *hl;
struct mlx5_hlist *expected = NULL;
cb_clone_free);
if (!hl) {
DRV_LOG(ERR, "%s hash creation failed", name);
- rte_errno = ENOMEM;
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
return NULL;
}
if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
uint32_t reg_c0 = priv->sh->dv_regc0_mask;
MLX5_ASSERT(reg_c0);
- MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+ MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
+ MLX5_XMETA_MODE_LEGACY);
if (conf->dst == REG_C_0) {
/* Copy to reg_c[0], within mask only. */
reg_dst.offset = rte_bsf32(reg_c0);
}
static int
-mlx5_flow_item_field_width(struct mlx5_priv *priv,
- enum rte_flow_field_id field)
+mlx5_flow_item_field_width(struct rte_eth_dev *dev,
+ enum rte_flow_field_id field, int inherit,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
switch (field) {
case RTE_FLOW_FIELD_START:
return 32;
case RTE_FLOW_FIELD_MARK:
return __builtin_popcount(priv->sh->dv_mark_mask);
case RTE_FLOW_FIELD_META:
- return __builtin_popcount(priv->sh->dv_meta_mask);
+ return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
+ __builtin_popcount(priv->sh->dv_meta_mask) : 32;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
- return 64;
+ return inherit < 0 ? 0 : inherit;
+ case RTE_FLOW_FIELD_IPV4_ECN:
+ case RTE_FLOW_FIELD_IPV6_ECN:
+ return 2;
default:
MLX5_ASSERT(false);
}
static void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
- struct field_modify_info *info,
- uint32_t *mask, uint32_t *value,
- uint32_t width, uint32_t dst_width,
- uint32_t *shift, struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+ struct field_modify_info *info, uint32_t *mask,
+ uint32_t width, struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t idx = 0;
uint32_t off = 0;
- uint64_t val = 0;
+
switch (data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 0,
+ info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
info[idx++] = (struct field_modify_info){2, 0,
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 0,
+ info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
if (width < 16) {
- mask[idx] = rte_cpu_to_be_16(0xffff >>
+ mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
- mask[idx] = RTE_BE16(0xffff);
+ mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
- mask[idx] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ mask[0] = rte_cpu_to_be_32((0xffffffff >>
+ (32 - width)) << off);
} else {
if (data->offset < 16)
info[idx++] = (struct field_modify_info){2, 0,
case RTE_FLOW_FIELD_IPV6_SRC:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
++idx;
}
if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
++idx;
}
if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
case RTE_FLOW_FIELD_IPV6_DST:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
- mask[idx] =
+ mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
++idx;
}
if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
- mask[idx] =
+ mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
++idx;
}
if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4,
- 4 * idx,
+ info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
- mask[idx] =
+ mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
- mask[idx] = RTE_BE32(0xffffffff);
+ mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 4 * idx,
+ info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
{
uint32_t meta_mask = priv->sh->dv_meta_mask;
uint32_t meta_count = __builtin_popcount(meta_mask);
- uint32_t msk_c0 =
- rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
- uint32_t shl_c0 = rte_bsf32(msk_c0);
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
- if (reg == REG_C_0)
- *shift = shl_c0;
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
(meta_count - width)) & meta_mask);
}
break;
+ case RTE_FLOW_FIELD_IPV4_ECN:
+ case RTE_FLOW_FIELD_IPV6_ECN:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IP_ECN};
+ if (mask)
+ mask[idx] = 0x3 >> (2 - width);
+ break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
- if (data->field == RTE_FLOW_FIELD_POINTER)
- memcpy(&val, (void *)(uintptr_t)data->value,
- sizeof(uint64_t));
- else
- val = data->value;
- for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
- if (mask[idx]) {
- if (dst_width == 48) {
- /*special case for MAC addresses */
- value[idx] = rte_cpu_to_be_16(val);
- val >>= 16;
- dst_width -= 16;
- } else if (dst_width > 16) {
- value[idx] = rte_cpu_to_be_32(val);
- val >>= 32;
- } else if (dst_width > 8) {
- value[idx] = rte_cpu_to_be_16(val);
- val >>= 16;
- } else {
- value[idx] = (uint8_t)val;
- val >>= 8;
- }
- if (*shift)
- value[idx] <<= *shift;
- if (!val)
- break;
- }
- }
- break;
default:
MLX5_ASSERT(false);
break;
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_modify_field *conf =
(const struct rte_flow_action_modify_field *)(action->conf);
- struct rte_flow_item item;
+ struct rte_flow_item item = {
+ .spec = NULL,
+ .mask = NULL
+ };
struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, 0} };
struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, 0} };
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
- uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
- uint32_t type;
- uint32_t shift = 0;
- uint32_t dst_width = mlx5_flow_item_field_width(priv, conf->dst.field);
+ uint32_t type, meta = 0;
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
- conf->src.field == RTE_FLOW_FIELD_VALUE) {
+ conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = MLX5_MODIFICATION_TYPE_SET;
/** For SET fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
- value, conf->width, dst_width,
- &shift, dev, attr, error);
- /** Then copy immediate value from source as per mask. */
- mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
- value, conf->width, dst_width,
- &shift, dev, attr, error);
- item.spec = &value;
+ conf->width, dev,
+ attr, error);
+ item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
+ (void *)(uintptr_t)conf->src.pvalue :
+ (void *)(uintptr_t)&conf->src.value;
+ if (conf->dst.field == RTE_FLOW_FIELD_META) {
+ meta = *(const unaligned_uint32_t *)item.spec;
+ meta = rte_cpu_to_be_32(meta);
+ item.spec = &meta;
+ }
} else {
type = MLX5_MODIFICATION_TYPE_COPY;
/** For COPY fill the destination field (dcopy) without mask. */
mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
- value, conf->width, dst_width,
- &shift, dev, attr, error);
+ conf->width, dev,
+ attr, error);
/** Then construct the source field (field) with mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
- value, conf->width, dst_width,
- &shift, dev, attr, error);
+ conf->width, dev,
+ attr, error);
}
item.mask = &mask;
return flow_dv_convert_modify_action(&item,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_item_mark *spec = item->spec;
const struct rte_flow_item_mark *mask = item->mask;
const struct rte_flow_item_mark nic_mask = {
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_item_meta *spec = item->spec;
const struct rte_flow_item_meta *mask = item->mask;
struct rte_flow_item_meta nic_mask = {
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
return ret;
if (!spec)
return 0;
+ if (spec->id == MLX5_PORT_ESW_MGR)
+ return 0;
esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
if (!esw_priv)
return rte_flow_error_set(error, rte_errno,
return 0;
}
+/**
+ * Validate represented port item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_represented_port(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ethdev *spec = item->spec;
+ const struct rte_flow_item_ethdev *mask = item->mask;
+ const struct rte_flow_item_ethdev switch_mask = {
+ .port_id = UINT16_MAX,
+ };
+ struct mlx5_priv *esw_priv;
+ struct mlx5_priv *dev_priv;
+ int ret;
+
+ if (!attr->transfer)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "match on port id is valid only when transfer flag is enabled");
+ if (item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple source ports are not supported");
+ if (!mask)
+ mask = &switch_mask;
+ if (mask->port_id != UINT16_MAX)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on \"id\" field");
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_ethdev_mask,
+ sizeof(struct rte_flow_item_ethdev),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+ if (ret)
+ return ret;
+ if (!spec || spec->port_id == UINT16_MAX)
+ return 0;
+ esw_priv = mlx5_port_to_eswitch_info(spec->port_id, false);
+ if (!esw_priv)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
+ "failed to obtain E-Switch info for port");
+ dev_priv = mlx5_dev_to_eswitch_info(dev);
+ if (!dev_priv)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to obtain E-Switch info");
+ if (esw_priv->domain_id != dev_priv->domain_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
+ "cannot match on a port from a different E-Switch");
+ return 0;
+}
+
/**
* Validate VLAN item.
*
.teid = RTE_BE32(0xffffffff),
};
- if (!priv->config.hca_attr.tunnel_stateless_gtp)
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GTP support is not enabled");
{
const struct rte_flow_item_gtp *gtp_spec;
const struct rte_flow_item_gtp *gtp_mask;
- const struct rte_flow_item_gtp_psc *spec;
const struct rte_flow_item_gtp_psc *mask;
const struct rte_flow_item_gtp_psc nic_mask = {
- .pdu_type = 0xFF,
- .qfi = 0xFF,
+ .hdr.type = 0xF,
+ .hdr.qfi = 0x3F,
};
if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
/* GTP spec is here and E flag is requested to match zero. */
if (!item->spec)
return 0;
- spec = item->spec;
mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
- if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "PDU type should be smaller than 16");
return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_gtp_psc),
{
int ret;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *last = item->last;
const struct rte_flow_item_ipv4 *mask = item->mask;
if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
- priv->config.hca_attr.inner_ipv4_ihl;
+ bool ihl_cap = !tunnel ?
+ attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
if (!ihl_cap)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- bool direction_error = false;
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
- /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
- if (attr->transfer) {
- bool fdb_tx = priv->representor_id != UINT16_MAX;
- bool is_cx5 = sh->steering_format_version ==
- MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
-
- if (!fdb_tx && is_cx5)
- direction_error = true;
- } else if (attr->ingress) {
- direction_error = true;
- }
- if (direction_error)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "push vlan action not supported for ingress");
if (!attr->transfer && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
int ret;
/* Fall back if no extended metadata register support. */
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_action_mark *mark = action->conf;
int ret;
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_action_set_meta *conf;
uint32_t nic_mask = UINT32_MAX;
int reg;
- if (!mlx5_flow_ext_mreg_supported(dev))
+ if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ !mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"extended metadata register"
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
}
/**
- * Check if action counter is shared by either old or new mechanism.
+ * Indicates whether ASO aging is supported.
*
- * @param[in] action
- * Pointer to the action structure.
+ * @param[in] sh
+ * Pointer to shared device context structure.
+ * @param[in] attr
+ * Attributes of flow that includes AGE action.
*
* @return
- * True when counter is shared, false otherwise.
+ * True when ASO aging is supported, false otherwise.
*/
static inline bool
-is_shared_action_count(const struct rte_flow_action *action)
+flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
+ const struct rte_flow_attr *attr)
{
- const struct rte_flow_action_count *count =
- (const struct rte_flow_action_count *)action->conf;
-
- if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
- return true;
- return !!(count && count->shared);
+ MLX5_ASSERT(sh && attr);
+ return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
}
/**
* Indicator if action is shared.
* @param[in] action_flags
* Holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
uint64_t action_flags,
+ const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!priv->config.devx)
+ if (!priv->sh->cdev->config.devx)
goto notsup_err;
if (action_flags & MLX5_FLOW_ACTION_COUNT)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"duplicate count actions set");
if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
- !priv->sh->flow_hit_aso_en)
+ !flow_hit_aso_supported(priv->sh, attr))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "old age and shared count combination is not supported");
+ "old age and indirect count combination is not supported");
#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
return 0;
#endif
{
const struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
- !priv->config.decap_en)
+ if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
+ !priv->sh->config.decap_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"decap is not enabled");
}
*resource = *ctx_resource;
resource->idx = idx;
- ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
- resource,
+ ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
+ domain, resource,
&resource->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
flow_dv_encap_decap_match_cb,
flow_dv_encap_decap_remove_cb,
flow_dv_encap_decap_clone_cb,
- flow_dv_encap_decap_clone_free_cb);
+ flow_dv_encap_decap_clone_free_cb,
+ error);
if (unlikely(!encaps_decaps))
return -rte_errno;
resource->flags = dev_flow->dv.group ? 0 : 1;
* @return
* sizeof struct item_type, 0 if void or irrelevant.
*/
-static size_t
+size_t
flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
{
size_t retval;
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error)
{
{
int ret = 0;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
const struct rte_flow_action_modify_field *action_modify_field =
action->conf;
- uint32_t dst_width = mlx5_flow_item_field_width(priv,
- action_modify_field->dst.field);
- uint32_t src_width = mlx5_flow_item_field_width(priv,
- action_modify_field->src.field);
+ uint32_t dst_width = mlx5_flow_item_field_width(dev,
+ action_modify_field->dst.field,
+ -1, attr, error);
+ uint32_t src_width = mlx5_flow_item_field_width(dev,
+ action_modify_field->src.field,
+ dst_width, attr, error);
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (ret)
"modifications of the GENEVE Network"
" Identifier is not supported");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
- action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
- action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
- action_modify_field->src.field == RTE_FLOW_FIELD_META) {
+ action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "cannot modify mark or metadata without"
- " extended metadata register support");
+ "cannot modify mark in legacy mode"
+ " or without extensive registers");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_META) {
+ if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ !mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "cannot modify meta without"
+ " extensive registers support");
+ ret = flow_dv_get_metadata_reg(dev, attr, error);
+ if (ret < 0 || ret == REG_NON)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "cannot modify meta without"
+ " extensive registers available");
}
if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"add and sub operations"
" are not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_IPV4_ECN ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_IPV4_ECN ||
+ action_modify_field->dst.field == RTE_FLOW_FIELD_IPV6_ECN ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_IPV6_ECN)
+ if (!hca_attr->modify_outer_ip_ecn &&
+ !attr->transfer && !attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "modifications of the ECN for current firmware is not supported");
return (action_modify_field->width / 32) +
!!(action_modify_field->width % 32);
}
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
- uint32_t target_group, table;
+ uint32_t target_group, table = 0;
int ret = 0;
struct flow_grp_info grp_info = {
.external = !!external,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"target group must be other than"
" the current flow group");
+ if (table == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "root table shouldn't be destination");
return 0;
}
/*
- * Validate the port_id action.
+ * Validate action PORT_ID / REPRESENTED_PORT.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
* @param[in] action
- * Port_id RTE action structure.
+ * PORT_ID / REPRESENTED_PORT action structure.
* @param[in] attr
* Attributes of flow that includes this action.
* @param[out] error
struct rte_flow_error *error)
{
const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_ethdev *ethdev;
struct mlx5_priv *act_priv;
struct mlx5_priv *dev_priv;
uint16_t port;
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "port id action is valid in transfer"
+ "port action is valid in transfer"
" mode only");
if (!action || !action->conf)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL,
- "port id action parameters must be"
+ "port action parameters must be"
" specified");
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
- port_id = action->conf;
- port = port_id->original ? dev->data->port_id : port_id->id;
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ port_id = action->conf;
+ port = port_id->original ? dev->data->port_id : port_id->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ethdev = action->conf;
+ port = ethdev->port_id;
+ break;
+ default:
+ MLX5_ASSERT(false);
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unknown E-Switch action");
+ }
act_priv = mlx5_port_to_eswitch_info(port, false);
if (!act_priv)
return rte_flow_error_set
(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
"failed to obtain E-Switch port id for port");
if (act_priv->domain_id != dev_priv->domain_id)
return rte_flow_error_set
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
- uint64_t action_flags,
+ uint64_t action_flags, uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
const struct rte_flow_item *port_id_item,
NULL,
"Flow and meter policy "
"have different src port.");
+ } else if (mtr_policy->is_rss) {
+ struct mlx5_flow_meter_policy *fp;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
+ const struct rte_flow_action *rss_act;
+ int ret;
+
+ fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ mtr_policy);
+ if (fp == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Unable to get the final "
+ "policy in the hierarchy");
+ acg = &fp->act_cnt[RTE_COLOR_GREEN];
+ acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+ MLX5_ASSERT(acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS ||
+ acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS);
+ if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+ rss_act = acg->rss;
+ else
+ rss_act = acy->rss;
+ ret = mlx5_flow_validate_action_rss(rss_act,
+ action_flags, dev, attr,
+ item_flags, error);
+ if (ret)
+ return ret;
}
*def_policy = false;
}
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_age *age = action->conf;
- if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
- !priv->sh->aso_age_mng))
+ if (!priv->sh->cdev->config.devx ||
+ (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
.grow_trunk = 3,
.grow_shift = 2,
.need_lock = 1,
- .release_mem_en = !!sh->reclaim_mode,
- .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
+ .release_mem_en = !!sh->config.reclaim_mode,
+ .per_core_cache =
+ sh->config.reclaim_mode ? 0 : (1 << 16),
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_modify_action_resource",
else
ns = sh->rx_domain;
ret = mlx5_flow_os_create_flow_action_modify_header
- (sh->ctx, ns, entry,
+ (sh->cdev->ctx, ns, entry,
data_len, &entry->action);
if (ret) {
mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *dev_conf = &priv->config;
+ struct mlx5_sh_config *dev_conf = &priv->sh->config;
const struct rte_flow_action_sample *sample = action->conf;
const struct rte_flow_action *act;
uint64_t sub_action_flags = 0;
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"ratio value starts from 1");
- if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
+ if (!priv->sh->cdev->config.devx ||
+ (sample->ratio > 0 && !priv->sampler_en))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, jump should "
"be after sample action");
+ if (*action_flags & MLX5_FLOW_ACTION_CT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Sample after CT not supported");
act = sample->actions;
for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count
- (dev, is_shared_action_count(act),
- *action_flags | sub_action_flags,
- error);
+ (dev, false, *action_flags | sub_action_flags,
+ attr, error);
if (ret < 0)
return ret;
*count = act->conf;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
ret = flow_dv_validate_action_port_id(dev,
sub_action_flags,
act,
NULL,
"E-Switch must has a dest "
"port for mirroring");
- if (!priv->config.hca_attr.reg_c_preserve &&
+ if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
priv->representor_id != UINT16_MAX)
*fdb_mirror_limit = 1;
}
/* Continue validation for Xcap actions.*/
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
flow_dv_modify_match_cb,
flow_dv_modify_remove_cb,
flow_dv_modify_clone_cb,
- flow_dv_modify_clone_free_cb);
+ flow_dv_modify_clone_free_cb,
+ error);
if (unlikely(!modify_cmds))
return -rte_errno;
resource->root = !dev_flow->dv.group;
if (fallback) {
/* bulk_bitmap must be 0 for single counter allocation. */
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id(cmng, dcs->id);
*cnt_free = cnt;
return pool;
}
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
- if (!priv->config.devx) {
+ if (!priv->sh->cdev->config.devx) {
rte_errno = ENOTSUP;
return 0;
}
return 0;
}
-/**
- * Allocate a shared flow counter.
- *
- * @param[in] ctx
- * Pointer to the shared counter configuration.
- * @param[in] data
- * Pointer to save the allocated counter index.
- *
- * @return
- * Index to flow counter on success, 0 otherwise and rte_errno is set.
- */
-
-static int32_t
-flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
-{
- struct mlx5_shared_counter_conf *conf = ctx;
- struct rte_eth_dev *dev = conf->dev;
- struct mlx5_flow_counter *cnt;
-
- data->dword = flow_dv_counter_alloc(dev, 0);
- data->dword |= MLX5_CNT_SHARED_OFFSET;
- cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
- cnt->shared_info.id = conf->id;
- return 0;
-}
-
-/**
- * Get a shared flow counter.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] id
- * Counter identifier.
- *
- * @return
- * Index to flow counter on success, 0 otherwise and rte_errno is set.
- */
-static uint32_t
-flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_counter_conf conf = {
- .dev = dev,
- .id = id,
- };
- union mlx5_l3t_data data = {
- .dword = 0,
- };
-
- mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
- flow_dv_counter_alloc_shared_cb, &conf);
- return data.dword;
-}
-
/**
* Get age param from counter index.
*
if (pool->is_aged) {
flow_dv_counter_remove_from_age(dev, counter, cnt);
} else {
- /*
- * If the counter action is shared by ID, the l3t_clear_entry
- * function reduces its references counter. If after the
- * reduction the action is still referenced, the function
- * returns here and does not release it.
- */
- if (IS_LEGACY_SHARED_CNT(counter) &&
- mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
- cnt->shared_info.id))
- return;
/*
* If the counter action is shared by indirect action API,
* the atomic function reduces its references counter.
* If after the reduction the action is still referenced, the
* function returns here and does not release it.
- * When the counter action is not shared neither by ID nor by
+ * When the counter action is not shared by
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (!IS_LEGACY_SHARED_CNT(counter) &&
- __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
+ if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
__ATOMIC_RELAXED))
return;
}
* NULL otherwise and rte_errno is set.
*/
static struct mlx5_aso_mtr_pool *
-flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
- struct mlx5_aso_mtr **mtr_free)
+flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *pools_mng =
- &priv->sh->mtrmng->pools_mng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr_pool *pool = NULL;
struct mlx5_devx_obj *dcs = NULL;
uint32_t i;
uint32_t log_obj_size;
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
- dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
- priv->sh->pdn, log_obj_size);
+ dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
return NULL;
}
pool->devx_obj = dcs;
+ rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
pool->index = pools_mng->n_valid;
if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
mlx5_free(pool);
claim_zero(mlx5_devx_cmd_destroy(dcs));
+ rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
return NULL;
}
pools_mng->pools[pool->index] = pool;
pools_mng->n_valid++;
+ rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
pool->mtrs[i].offset = i;
- LIST_INSERT_HEAD(&pools_mng->meters,
- &pool->mtrs[i], next);
+ LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
}
pool->mtrs[0].offset = 0;
*mtr_free = &pool->mtrs[0];
struct mlx5_aso_mtr_pool *pool;
uint32_t mtr_idx = 0;
- if (!priv->config.devx) {
+ if (!priv->sh->cdev->config.devx) {
rte_errno = ENOTSUP;
return 0;
}
struct mlx5_aso_mtr_pool,
mtrs[mtr_free->offset]);
mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
- if (!mtr_free->fm.meter_action) {
+ if (!mtr_free->fm.meter_action_g) {
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
struct rte_flow_error error;
uint8_t reg_id;
reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
- mtr_free->fm.meter_action =
+ mtr_free->fm.meter_action_g =
mlx5_glue->dv_create_flow_action_aso
(priv->sh->rx_domain,
pool->devx_obj->obj,
(1 << MLX5_FLOW_COLOR_GREEN),
reg_id - REG_C_0);
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
- if (!mtr_free->fm.meter_action) {
+ if (!mtr_free->fm.meter_action_g) {
flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
return 0;
}
NULL,
"priority out of range");
if (attributes->transfer) {
- if (!priv->config.dv_esw_en)
+ if (!priv->sh->config.dv_esw_en)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"E-Switch dr is not supported");
- if (!(priv->representor || priv->master))
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "E-Switch configuration can only be"
- " done by a master or a representor device");
if (attributes->egress)
return rte_flow_error_set
(error, ENOTSUP,
return ret;
}
-static uint16_t
-mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
- const struct rte_flow_item *end)
+static int
+validate_integrity_bits(const struct rte_flow_item_integrity *mask,
+ int64_t pattern_flags, uint64_t l3_flags,
+ uint64_t l4_flags, uint64_t ip4_flag,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item *item = *head;
- uint16_t l3_protocol;
+ if (mask->l3_ok && !(pattern_flags & l3_flags))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "missing L3 protocol");
- for (; item != end; item++) {
- switch (item->type) {
- default:
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- l3_protocol = RTE_ETHER_TYPE_IPV4;
- goto l3_ok;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- l3_protocol = RTE_ETHER_TYPE_IPV6;
- goto l3_ok;
- case RTE_FLOW_ITEM_TYPE_ETH:
- if (item->mask && item->spec) {
- MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
- type, item,
- l3_protocol);
- if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
- l3_protocol == RTE_ETHER_TYPE_IPV6)
- goto l3_ok;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- if (item->mask && item->spec) {
- MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
- inner_type, item,
- l3_protocol);
- if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
- l3_protocol == RTE_ETHER_TYPE_IPV6)
- goto l3_ok;
- }
- break;
- }
- }
- return 0;
-l3_ok:
- *head = item;
- return l3_protocol;
-}
+ if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "missing IPv4 protocol");
-static uint8_t
-mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
- const struct rte_flow_item *end)
-{
- const struct rte_flow_item *item = *head;
- uint8_t l4_protocol;
+ if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "missing L4 protocol");
- for (; item != end; item++) {
- switch (item->type) {
- default:
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- l4_protocol = IPPROTO_TCP;
- goto l4_ok;
- case RTE_FLOW_ITEM_TYPE_UDP:
- l4_protocol = IPPROTO_UDP;
- goto l4_ok;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- if (item->mask && item->spec) {
- const struct rte_flow_item_ipv4 *mask, *spec;
-
- mask = (typeof(mask))item->mask;
- spec = (typeof(spec))item->spec;
- l4_protocol = mask->hdr.next_proto_id &
- spec->hdr.next_proto_id;
- if (l4_protocol == IPPROTO_TCP ||
- l4_protocol == IPPROTO_UDP)
- goto l4_ok;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- if (item->mask && item->spec) {
- const struct rte_flow_item_ipv6 *mask, *spec;
- mask = (typeof(mask))item->mask;
- spec = (typeof(spec))item->spec;
- l4_protocol = mask->hdr.proto & spec->hdr.proto;
- if (l4_protocol == IPPROTO_TCP ||
- l4_protocol == IPPROTO_UDP)
- goto l4_ok;
- }
- break;
- }
- }
return 0;
-l4_ok:
- *head = item;
- return l4_protocol;
}
static int
-flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
- const struct rte_flow_item *rule_items,
- const struct rte_flow_item *integrity_item,
+flow_dv_validate_item_integrity_post(const struct
+ rte_flow_item *integrity_items[2],
+ int64_t pattern_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_integrity *mask;
+ int ret;
+
+ if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
+ mask = (typeof(mask))integrity_items[0]->mask;
+ ret = validate_integrity_bits(mask, pattern_flags,
+ MLX5_FLOW_LAYER_OUTER_L3,
+ MLX5_FLOW_LAYER_OUTER_L4,
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4,
+ error);
+ if (ret)
+ return ret;
+ }
+ if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
+ mask = (typeof(mask))integrity_items[1]->mask;
+ ret = validate_integrity_bits(mask, pattern_flags,
+ MLX5_FLOW_LAYER_INNER_L3,
+ MLX5_FLOW_LAYER_INNER_L4,
+ MLX5_FLOW_LAYER_INNER_L3_IPV4,
+ error);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
+ const struct rte_flow_item *integrity_item,
+ uint64_t pattern_flags, uint64_t *last_item,
+ const struct rte_flow_item *integrity_items[2],
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
const struct rte_flow_item_integrity *mask = (typeof(mask))
integrity_item->mask;
const struct rte_flow_item_integrity *spec = (typeof(spec))
integrity_item->spec;
- uint32_t protocol;
- if (!priv->config.hca_attr.pkt_integrity_match)
+ if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
integrity_item,
"packet integrity integrity_item not supported");
+ if (!spec)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "no spec for integrity item");
if (!mask)
mask = &rte_flow_item_integrity_mask;
if (!mlx5_validate_integrity_item(mask))
RTE_FLOW_ERROR_TYPE_ITEM,
integrity_item,
"unsupported integrity filter");
- tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
if (spec->level > 1) {
- if (!tunnel_item)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- integrity_item,
- "missing tunnel item");
- item = tunnel_item;
- end_item = mlx5_find_end_item(tunnel_item);
+ if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple inner integrity items not supported");
+ integrity_items[1] = integrity_item;
+ *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
} else {
- end_item = tunnel_item ? tunnel_item :
- mlx5_find_end_item(integrity_item);
- }
- if (mask->l3_ok || mask->ipv4_csum_ok) {
- protocol = mlx5_flow_locate_proto_l3(&item, end_item);
- if (!protocol)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- integrity_item,
- "missing L3 protocol");
+ if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple outer integrity items not supported");
+ integrity_items[0] = integrity_item;
+ *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
}
- if (mask->l4_ok || mask->l4_csum_ok) {
- protocol = mlx5_flow_locate_proto_l4(&item, end_item);
- if (!protocol)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- integrity_item,
- "missing L4 protocol");
+ return 0;
+}
+
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t *last_item,
+ bool is_inner,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_flex *flow_spec = item->spec;
+ const struct rte_flow_item_flex *flow_mask = item->mask;
+ struct mlx5_flex_item *flex;
+
+ if (!flow_spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item spec cannot be NULL");
+ if (!flow_mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item mask cannot be NULL");
+ if (item->last)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item last not supported");
+ if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid flex flow item handle");
+ flex = (struct mlx5_flex_item *)flow_spec->handle;
+ switch (flex->tunnel_mode) {
+ case FLEX_TUNNEL_MODE_SINGLE:
+ if (item_flags &
+ (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_OUTER:
+ if (is_inner)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "inner flex item was not configured");
+ if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_INNER:
+ if (!is_inner)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "outer flex item was not configured");
+ if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ break;
+ case FLEX_TUNNEL_MODE_MULTI:
+ if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+ (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ }
+ break;
+ case FLEX_TUNNEL_MODE_TUNNEL:
+ if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex tunnel items not supported");
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "invalid flex item configuration");
}
+ *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+ MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+ MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
return 0;
}
bool external, int hairpin, struct rte_flow_error *error)
{
int ret;
- uint64_t action_flags = 0;
+ uint64_t aso_mask, action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
},
};
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *dev_conf = &priv->config;
+ struct mlx5_sh_config *dev_conf = &priv->sh->config;
uint16_t queue_index = 0xFFFF;
const struct rte_flow_item_vlan *vlan_m = NULL;
uint32_t rw_act_num = 0;
.std_tbl_fix = true,
};
const struct rte_eth_hairpin_conf *conf;
- const struct rte_flow_item *rule_items = items;
+ const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
+ bool shared_count = false;
uint16_t udp_dport = 0;
+ uint32_t tag_id = 0;
+ const struct rte_flow_action_age *non_shared_age = NULL;
+ const struct rte_flow_action_count *count = NULL;
if (items == NULL)
return -1;
tunnel = is_tunnel_offload_active(dev) ?
mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
if (tunnel) {
+ if (!dev_conf->dv_flow_en)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "tunnel offload requires DV flow interface");
if (priv->representor)
return rte_flow_error_set
(error, ENOTSUP,
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ ret = mlx5_flow_os_validate_item_esp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
ret = flow_dv_validate_item_port_id
(dev, items, attr, item_flags, error);
last_item = MLX5_FLOW_ITEM_PORT_ID;
port_id_item = items;
break;
+ case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ ret = flow_dv_validate_item_represented_port
+ (dev, items, attr, item_flags, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
+ break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
true, error);
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
+ attr, gre_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_nvgre(items, item_flags,
next_protocol,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
break;
case RTE_FLOW_ITEM_TYPE_GTP:
ret = flow_dv_validate_item_gtp(dev, items, item_flags,
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
- if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "multiple integrity items not supported");
- ret = flow_dv_validate_item_integrity(dev, rule_items,
- items, error);
+ ret = flow_dv_validate_item_integrity(dev, items,
+ item_flags,
+ &last_item,
+ integrity_items,
+ error);
if (ret < 0)
return ret;
- last_item = MLX5_FLOW_ITEM_INTEGRITY;
break;
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
ret = flow_dv_validate_item_aso_ct(dev, items,
* list it here as a supported type
*/
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ ret = flow_dv_validate_item_flex(dev, items, item_flags,
+ &last_item,
+ tunnel != 0, error);
+ if (ret < 0)
+ return ret;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
}
item_flags |= last_item;
}
+ if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+ ret = flow_dv_validate_item_integrity_post(integrity_items,
+ item_flags, error);
+ if (ret)
+ return ret;
+ }
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
- bool shared_count = false;
if (!mlx5_flow_os_action_supported(type))
return rte_flow_error_set(error, ENOTSUP,
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
ret = flow_dv_validate_action_port_id(dev,
action_flags,
actions,
++actions_n;
if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
modify_after_mirror = 1;
+ tag_id = ((const struct rte_flow_action_set_tag *)
+ actions->conf)->index;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
++actions_n;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+ shared_count = true;
+ /* fall-through. */
case RTE_FLOW_ACTION_TYPE_COUNT:
- shared_count = is_shared_action_count(actions);
ret = flow_dv_validate_action_count(dev, shared_count,
action_flags,
- error);
+ attr, error);
if (ret < 0)
return ret;
+ count = actions->conf;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
+ item_flags,
actions, attr,
port_id_item,
&def_policy,
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
+ non_shared_age = actions->conf;
ret = flow_dv_validate_action_age(action_flags,
actions, dev,
error);
return ret;
/*
* Validate the regular AGE action (using counter)
- * mutual exclusion with share counter actions.
+ * mutual exclusion with indirect counter actions.
*/
- if (!priv->sh->flow_hit_aso_en) {
+ if (!flow_hit_aso_supported(priv->sh, attr)) {
if (shared_count)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
- "old age and shared count combination is not supported");
+ "old age and indirect count combination is not supported");
if (sample_count)
return rte_flow_error_set
(error, EINVAL,
error);
if (ret < 0)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
+ tag_id == 0 && priv->mtr_color_reg == REG_NON)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "sample after tag action causes metadata tag index 0 corruption");
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
++actions_n;
break;
* - Explicit decap action is prohibited by the tunnel offload API.
* - Drop action in tunnel steer rule is prohibited by the API.
* - Application cannot use MARK action because it's value can mask
- * tunnel default miss nitification.
+ * tunnel default miss notification.
* - JUMP in tunnel match rule has no support in current PMD
* implementation.
* - TAG & META are reserved for future uses.
*/
if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
MLX5_FLOW_VLAN_ACTIONS)) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
conf->tx_explicit != 0))) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
+ /* Push VLAN is not supported in ingress except for NICs newer than CX5. */
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ bool direction_error = false;
+
+ if (attr->transfer) {
+ bool fdb_tx = priv->representor_id != UINT16_MAX;
+ bool is_cx5 = sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+
+ if (!fdb_tx && is_cx5)
+ direction_error = true;
+ } else if (attr->ingress) {
+ direction_error = true;
+ }
+ if (direction_error)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "push VLAN action not supported "
+ "for ingress");
+ }
if (!attr->transfer && attr->ingress) {
if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap is not supported"
" for ingress traffic");
- else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "push VLAN action not "
- "supported for ingress");
else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
MLX5_FLOW_VLAN_ACTIONS)
return rte_flow_error_set
"cannot be done before meter action");
}
}
+ /*
+ * Only support one ASO action in a single flow rule.
+ * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
+ * Group 0 uses HW counter for AGE too even if no counter action.
+ */
+ aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
+ (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
+ (action_flags & MLX5_FLOW_ACTION_AGE &&
+ !(non_shared_age && count) &&
+ (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
+ priv->sh->flow_hit_aso_en);
+ if (__builtin_popcountl(aso_mask) > 1)
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
/*
* Hairpin flow will add one more TAG action in TX implicit mode.
* In TX explicit mode, there will be no hairpin flow ID.
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"sample before modify action is not supported");
+ /*
+ * Validation the NIC Egress flow on representor, except implicit
+ * hairpin default egress flow with TX_QUEUE item, other flows not
+ * work due to metadata regC0 mismatch.
+ */
+ if ((!attr->transfer && attr->egress) && priv->representor &&
+ !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NIC egress rules on representors"
+ " is not supported");
return 0;
}
(tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
}
+/**
+ * Add ESP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_esp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_esp *esp_m = item->mask;
+ const struct rte_flow_item_esp *esp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ char *spi_m;
+ char *spi_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
+ if (!esp_v)
+ return;
+ if (!esp_m)
+ esp_m = &rte_flow_item_esp_mask;
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ if (inner) {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
+ } else {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
+ }
+ *(uint32_t *)spi_m = esp_m->hdr.spi;
+ *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
+}
+
/**
* Add UDP item to matcher and to the value.
*
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_gre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *gre_m = item->mask;
const struct rte_flow_item_gre *gre_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct {
uint16_t value;
};
} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
- if (!gre_v)
- return;
- if (!gre_m)
- gre_m = &rte_flow_item_gre_mask;
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
- rte_be_to_cpu_16(gre_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
gre_crks_rsvd0_ver_v.s_present &
gre_crks_rsvd0_ver_m.s_present);
+ protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+ protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ protocol_m & protocol_v);
+}
+
+/**
+ * Add GRE optional items to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] gre_item
+ * Pointer to gre_item.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
+ */
+static void
+flow_dv_translate_item_gre_option(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const struct rte_flow_item *gre_item,
+ uint64_t pattern_flags)
+{
+ const struct rte_flow_item_gre_opt *option_m = item->mask;
+ const struct rte_flow_item_gre_opt *option_v = item->spec;
+ const struct rte_flow_item_gre *gre_m = gre_item->mask;
+ const struct rte_flow_item_gre *gre_v = gre_item->spec;
+ static const struct rte_flow_item_gre empty_gre = {0};
+ struct rte_flow_item gre_key_item;
+ uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
+ void *misc5_m;
+ void *misc5_v;
+
+ /*
+ * If only match key field, keep using misc for matching.
+ * If need to match checksum or sequence, using misc5 and do
+ * not need using misc.
+ */
+ if (!(option_m->sequence.sequence ||
+ option_m->checksum_rsvd.checksum)) {
+ flow_dv_translate_item_gre(matcher, key, gre_item,
+ pattern_flags);
+ gre_key_item.spec = &option_v->key.key;
+ gre_key_item.mask = &option_m->key.key;
+ flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
+ return;
+ }
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
+ protocol_v = gre_v->protocol;
+ protocol_m = gre_m->protocol;
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ uint16_t ether_type =
+ mlx5_translate_tunnel_etypes(pattern_flags);
+ if (ether_type) {
+ protocol_v = rte_be_to_cpu_16(ether_type);
+ protocol_m = UINT16_MAX;
+ }
+ }
+ c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
+ c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
+ if (option_m->sequence.sequence) {
+ c_rsvd0_ver_v |= RTE_BE16(0x1000);
+ c_rsvd0_ver_m |= RTE_BE16(0x1000);
+ }
+ if (option_m->key.key) {
+ c_rsvd0_ver_v |= RTE_BE16(0x2000);
+ c_rsvd0_ver_m |= RTE_BE16(0x2000);
+ }
+ if (option_m->checksum_rsvd.checksum) {
+ c_rsvd0_ver_v |= RTE_BE16(0x8000);
+ c_rsvd0_ver_m |= RTE_BE16(0x8000);
+ }
+ /*
+ * Hardware parses GRE optional field into the fixed location,
+ * do not need to adjust the tunnel dword indices.
+ */
+ misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
+ misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,
+ rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &
+ (c_rsvd0_ver_m | protocol_m << 16)));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,
+ rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,
+ rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &
+ option_m->checksum_rsvd.checksum));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,
+ rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,
+ rte_be_to_cpu_32(option_v->key.key & option_m->key.key));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,
+ rte_be_to_cpu_32(option_m->key.key));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,
+ rte_be_to_cpu_32(option_v->sequence.sequence &
+ option_m->sequence.sequence));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,
+ rte_be_to_cpu_32(option_m->sequence.sequence));
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] inner
- * Item is inner pattern.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
*/
static void
flow_dv_translate_item_nvgre(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ unsigned long pattern_flags)
{
const struct rte_flow_item_nvgre *nvgre_m = item->mask;
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
.mask = &gre_mask,
.last = NULL,
};
- flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+ flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
if (!nvgre_v)
return;
if (!nvgre_m)
static void
flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ const uint64_t pattern_flags)
{
+ static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* The item was validated to be on the outer side */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
- char *vni_m;
- char *vni_v;
- uint16_t dport;
- int size;
- int i;
+ char *vni_m =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+ char *vni_v =
+ MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+ int i, size = sizeof(vxlan_m->vni);
uint8_t flags_m = 0xff;
uint8_t flags_v = 0xc;
+ uint8_t m_protocol, v_protocol;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
- MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_VXLAN_GPE);
+ }
+ if (!vxlan_v) {
+ vxlan_v = &dummy_vxlan_gpe_hdr;
+ vxlan_m = &dummy_vxlan_gpe_hdr;
+ } else {
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_gpe_mask;
}
- if (!vxlan_v)
- return;
- if (!vxlan_m)
- vxlan_m = &rte_flow_item_vxlan_gpe_mask;
- size = sizeof(vxlan_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
memcpy(vni_m, vxlan_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
- MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
- vxlan_m->protocol);
- MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
- vxlan_v->protocol);
+ m_protocol = vxlan_m->protocol;
+ v_protocol = vxlan_v->protocol;
+ if (!m_protocol) {
+ /* Force next protocol to ensure next headers parsing. */
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+ if (v_protocol)
+ m_protocol = 0xFF;
+ }
+ MLX5_SET(fte_match_set_misc3, misc_m,
+ outer_vxlan_gpe_next_protocol, m_protocol);
+ MLX5_SET(fte_match_set_misc3, misc_v,
+ outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
}
/**
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_geneve empty_geneve = {0,};
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* GENEVE flow item validation allows single tunnel item */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
- char *vni_m;
- char *vni_v;
- size_t size, i;
+ char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ size_t size = sizeof(geneve_m->vni), i;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_GENEVE);
+ }
+ if (!geneve_v) {
+ geneve_v = &empty_geneve;
+ geneve_m = &empty_geneve;
+ } else {
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
}
- if (!geneve_v)
- return;
- if (!geneve_m)
- geneve_m = &rte_flow_item_geneve_mask;
- size = sizeof(geneve_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
- MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+ protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ protocol_m & protocol_v);
}
/**
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- /* We already have GENVE TLV option obj allocated. */
+ /* We already have GENEVE TLV option obj allocated. */
__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
__ATOMIC_RELAXED);
} else {
}
} else {
/* Create a GENEVE TLV object and resource. */
- obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+ obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
geneve_opt_v->option_class,
geneve_opt_v->option_type,
geneve_opt_v->option_len);
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
geneve_opt_v->option_len + 1);
}
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
/* Set the data. */
if (geneve_opt_v->data) {
memcpy(&opt_data_key, geneve_opt_v->data,
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
- MLX5_UDP_PORT_MPLS);
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ }
break;
case MLX5_FLOW_LAYER_GRE:
/* Fall-through. */
case MLX5_FLOW_LAYER_GRE_KEY:
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- RTE_ETHER_TYPE_MPLS);
+ if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ }
break;
default:
break;
struct mlx5_priv *priv;
uint16_t mask, id;
+ if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
+ flow_dv_translate_item_source_vport(matcher, key,
+ flow_dv_get_esw_manager_vport_id(dev), 0xffff);
+ return 0;
+ }
mask = pid_m ? pid_m->id : 0xffff;
id = pid_v ? pid_v->id : dev->data->port_id;
priv = mlx5_port_to_eswitch_info(id, item == NULL);
return 0;
}
+/**
+ * Translate represented port item to eswitch match on port id.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in]
+ * Flow attributes.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,
+ void *key,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr)
+{
+ const struct rte_flow_item_ethdev *pid_m = item ? item->mask : NULL;
+ const struct rte_flow_item_ethdev *pid_v = item ? item->spec : NULL;
+ struct mlx5_priv *priv;
+ uint16_t mask, id;
+
+ if (!pid_m && !pid_v)
+ return 0;
+ if (pid_v && pid_v->port_id == UINT16_MAX) {
+ flow_dv_translate_item_source_vport(matcher, key,
+ flow_dv_get_esw_manager_vport_id(dev), UINT16_MAX);
+ return 0;
+ }
+ mask = pid_m ? pid_m->port_id : UINT16_MAX;
+ id = pid_v ? pid_v->port_id : dev->data->port_id;
+ priv = mlx5_port_to_eswitch_info(id, item == NULL);
+ if (!priv)
+ return -rte_errno;
+ /*
+ * Translate to vport field or to metadata, depending on mode.
+ * Kernel can use either misc.source_port or half of C0 metadata
+ * register.
+ */
+ if (priv->vport_meta_mask) {
+ /*
+ * Provide the hint for SW steering library
+ * to insert the flow into ingress domain and
+ * save the extra vport match.
+ */
+ if (mask == UINT16_MAX && priv->vport_id == UINT16_MAX &&
+ priv->pf_bond < 0 && attr->transfer)
+ flow_dv_translate_item_source_vport
+ (matcher, key, priv->vport_id, mask);
+ /*
+ * We should always set the vport metadata register,
+ * otherwise the SW steering library can drop
+ * the rule if wire vport metadata value is not zero,
+ * it depends on kernel configuration.
+ */
+ flow_dv_translate_item_meta_vport(matcher, key,
+ priv->vport_meta_tag,
+ priv->vport_meta_mask);
+ } else {
+ flow_dv_translate_item_source_vport(matcher, key,
+ priv->vport_id, mask);
+ }
+ return 0;
+}
+
/**
* Add ICMP6 item to matcher and to the value.
*
if (!gtp_psc_m)
gtp_psc_m = &rte_flow_item_gtp_psc_mask;
dw_0.w32 = 0;
- dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
- dw_0.qfi = gtp_psc_m->qfi;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
+ dw_0.qfi = gtp_psc_m->hdr.qfi;
MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
rte_cpu_to_be_32(dw_0.w32));
dw_0.w32 = 0;
- dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
- gtp_psc_m->pdu_type);
- dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
+ gtp_psc_m->hdr.type);
+ dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
rte_cpu_to_be_32(dw_0.w32));
}
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] samples
- * Sample IDs to be used in the matching.
+ * @param[in] last_item
+ * Last item flags.
*/
static void
flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
- void *key, const struct rte_flow_item *item)
+ void *key, const struct rte_flow_item *item,
+ uint64_t last_item)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ecpri *ecpri_m = item->mask;
void *dw_m;
void *dw_v;
+ /*
+ * In case of eCPRI over Ethernet, if EtherType is not specified,
+ * match on eCPRI EtherType implicitly.
+ */
+ if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
+ void *hdrs_m, *hdrs_v, *l2m, *l2v;
+
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
+ l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
+ if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
+ *(uint16_t *)l2m = UINT16_MAX;
+ *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
+ }
+ }
if (!ecpri_v)
return;
if (!ecpri_m)
*/
if (!ecpri_m->hdr.common.u32)
return;
- samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ samples = priv->sh->ecpri_parser.ids;
/* Need to take the whole DW as the mask to fill the entry. */
dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
prog_sample_field_value_0);
reg_value, reg_mask);
}
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+ const struct rte_flow_item *item,
+ struct mlx5_flow *dev_flow, bool is_inner)
+{
+ const struct rte_flow_item_flex *spec =
+ (const struct rte_flow_item_flex *)item->spec;
+ int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+ MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+ if (index < 0)
+ return;
+ if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+ /* Don't count both inner and outer flex items in one rule. */
+ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+ MLX5_ASSERT(false);
+ dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
+ }
+ mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
* Check flow matching criteria first, subtract misc5/4 length if flow
* doesn't own misc5/4 parameters. In some old rdma-core releases,
* misc5/4 are not supported, and matcher creation failure is expected
- * w/o subtration. If misc5 is provided, misc4 must be counted in since
+ * w/o subtraction. If misc5 is provided, misc4 must be counted in since
* misc5 is right after misc4.
*/
if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
tbl_data->tunnel->tunnel_id : 0,
tbl_data->group_id);
}
- mlx5_list_destroy(tbl_data->matchers);
+ if (tbl_data->matchers)
+ mlx5_list_destroy(tbl_data->matchers);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
dv_attr.priority = ref->priority;
if (tbl->is_egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->tbl.obj,
&resource->matcher_object);
if (ret) {
mlx5_free(resource);
flow_dv_tag_match_cb,
flow_dv_tag_remove_cb,
flow_dv_tag_clone_cb,
- flow_dv_tag_clone_free_cb);
+ flow_dv_tag_clone_free_cb,
+ error);
if (unlikely(!tag_table))
return -rte_errno;
entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
}
/**
- * Translate port ID action to vport.
+ * Translate action PORT_ID / REPRESENTED_PORT to vport.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
- * Pointer to the port ID action.
+ * Pointer to action PORT_ID / REPRESENTED_PORT.
* @param[out] dst_port_id
* The target port ID.
* @param[out] error
{
uint32_t port;
struct mlx5_priv *priv;
- const struct rte_flow_action_port_id *conf =
- (const struct rte_flow_action_port_id *)action->conf;
- port = conf->original ? dev->data->port_id : conf->id;
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID: {
+ const struct rte_flow_action_port_id *conf;
+
+ conf = (const struct rte_flow_action_port_id *)action->conf;
+ port = conf->original ? dev->data->port_id : conf->id;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_action_ethdev *ethdev;
+
+ ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+ port = ethdev->port_id;
+ break;
+ }
+ default:
+ MLX5_ASSERT(false);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unknown E-Switch action");
+ }
+
priv = mlx5_port_to_eswitch_info(port, false);
if (!priv)
return rte_flow_error_set(error, -rte_errno,
static uint32_t
flow_dv_translate_create_counter(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
- const struct rte_flow_action_count *count,
+ const struct rte_flow_action_count *count
+ __rte_unused,
const struct rte_flow_action_age *age)
{
uint32_t counter;
struct mlx5_age_param *age_param;
- if (count && count->shared)
- counter = flow_dv_counter_get_shared(dev, count->id);
- else
- counter = flow_dv_counter_alloc(dev, !!age);
+ counter = flow_dv_counter_alloc(dev, !!age);
if (!counter || age == NULL)
return counter;
age_param = flow_dv_counter_idx_get_age(dev, counter);
{
const struct mlx5_rte_flow_item_tx_queue *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v;
- void *misc_m =
- MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
- void *misc_v =
- MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct mlx5_txq_ctrl *txq;
- uint32_t queue;
-
+ uint32_t queue, mask;
queue_m = (const void *)item->mask;
- if (!queue_m)
- return;
queue_v = (const void *)item->spec;
if (!queue_v)
return;
txq = mlx5_txq_get(dev, queue_v->queue);
if (!txq)
return;
- queue = txq->obj->sq->id;
- MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
- MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
- queue & queue_m->queue);
+ if (txq->is_hairpin)
+ queue = txq->obj->sq->id;
+ else
+ queue = txq->obj->sq_obj.sq->id;
+ mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
+ MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
mlx5_txq_release(dev, queue_v->queue);
}
/**
* Set the hash fields according to the @p flow information.
*
- * @param[in] dev_flow
- * Pointer to the mlx5_flow.
+ * @param[in] item_flags
+ * The match pattern item flags.
* @param[in] rss_desc
* Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hash_fields
+ * Pointer to the RSS hash fields.
*/
-static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc)
+void
+flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields)
{
- uint64_t items = dev_flow->handle->layers;
+ uint64_t items = item_flags;
+ uint64_t fields = 0;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
- dev_flow->hash_fields = 0;
+ *hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (rss_desc->level >= 2)
rss_inner = 1;
#endif
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
+ !items) {
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
+ fields |= IBV_RX_HASH_SRC_IPV4;
+ else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
+ fields |= IBV_RX_HASH_DST_IPV4;
else
- dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ fields |= MLX5_IPV4_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
+ !items) {
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
+ fields |= IBV_RX_HASH_SRC_IPV6;
+ else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
+ fields |= IBV_RX_HASH_DST_IPV6;
else
- dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (dev_flow->hash_fields == 0)
+ if (items & MLX5_FLOW_ITEM_ESP) {
+ if (rss_types & RTE_ETH_RSS_ESP)
+ fields |= IBV_RX_HASH_IPSEC_SPI;
+ }
+ if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
+ *hash_fields = fields;
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
+ }
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
- if (rss_types & ETH_RSS_UDP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_UDP;
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
+ !items) {
+ if (rss_types & RTE_ETH_RSS_UDP) {
+ if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
+ fields |= IBV_RX_HASH_SRC_PORT_UDP;
+ else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
+ fields |= IBV_RX_HASH_DST_PORT_UDP;
else
- dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ fields |= MLX5_UDP_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
- if (rss_types & ETH_RSS_TCP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_TCP;
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
+ !items) {
+ if (rss_types & RTE_ETH_RSS_TCP) {
+ if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
+ fields |= IBV_RX_HASH_SRC_PORT_TCP;
+ else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
+ fields |= IBV_RX_HASH_DST_PORT_TCP;
else
- dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ fields |= MLX5_TCP_IBV_RX_HASH;
}
}
if (rss_inner)
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ fields |= IBV_RX_HASH_INNER;
+ *hash_fields = fields;
}
/**
struct mlx5_flow_rss_desc *rss_desc,
uint32_t *hrxq_idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *dh = dev_flow->handle;
+ uint32_t shared_rss = rss_desc->shared_rss;
struct mlx5_hrxq *hrxq;
MLX5_ASSERT(rss_desc->queue_num);
rss_desc->shared_rss = 0;
if (rss_desc->hash_fields == 0)
rss_desc->queue_num = 1;
- *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
- if (!*hrxq_idx)
- return NULL;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- *hrxq_idx);
+ hrxq = mlx5_hrxq_get(dev, rss_desc);
+ *hrxq_idx = hrxq ? hrxq->idx : 0;
+ rss_desc->shared_rss = shared_rss;
return hrxq;
}
goto error;
}
}
- /* create a dest array actioin */
+ /* create a dest array action */
ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
resource->num_of_dest,
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
*/
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
rss_desc, &hrxq_idx);
if (!hrxq)
(((const struct rte_flow_action_mark *)
(sub_actions->conf))->id);
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
pre_rix = dev_flow->handle->dvh.rix_tag;
/* Save the mark resource before sample */
pre_r = dev_flow->dv.tag_resource;
break;
}
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
{
struct mlx5_flow_dv_port_id_action_resource
port_id_resource;
struct mlx5_devx_obj *obj = NULL;
uint32_t i;
- obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
- priv->sh->pdn);
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
+ priv->sh->cdev->pdn);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
}
pool->flow_hit_aso_obj = obj;
pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
- rte_spinlock_lock(&mng->resize_sl);
+ rte_rwlock_write_lock(&mng->resize_rwl);
pool->index = mng->next;
/* Resize pools array if there is no room for the new pool in it. */
if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
claim_zero(mlx5_devx_cmd_destroy(obj));
mlx5_free(pool);
- rte_spinlock_unlock(&mng->resize_sl);
+ rte_rwlock_write_unlock(&mng->resize_rwl);
return NULL;
}
mng->pools[pool->index] = pool;
mng->next++;
- rte_spinlock_unlock(&mng->resize_sl);
+ rte_rwlock_write_unlock(&mng->resize_rwl);
/* Assign the first action in the new pool, the rest go to free list. */
*age_free = &pool->actions[0];
for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
void *headers_m, void *headers_v)
{
if (mask->l4_ok) {
- /* application l4_ok filter aggregates all hardware l4 filters
- * therefore hw l4_checksum_ok must be implicitly added here.
+ /* RTE l4_ok filter aggregates hardware l4_ok and
+ * l4_checksum_ok filters.
+ * Positive RTE l4_ok match requires hardware match on both L4
+ * hardware integrity bits.
+ * For negative match, check hardware l4_checksum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L4.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.l4_csum_ok = 1;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- local_item.l4_csum_ok);
if (value->l4_ok) {
- /* application l4_ok = 1 matches sets both hw flags
- * l4_ok and l4_checksum_ok flags to 1.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, local_item.l4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
- mask->l4_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
- value->l4_ok);
- } else {
- /* application l4_ok = 0 matches on hw flag
- * l4_checksum_ok = 0 only.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- l4_checksum_ok, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
}
- } else if (mask->l4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
- mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
+ !!value->l4_ok);
+ }
+ if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
value->l4_csum_ok);
}
static void
flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
const struct rte_flow_item_integrity *value,
- void *headers_m, void *headers_v,
- bool is_ipv4)
+ void *headers_m, void *headers_v, bool is_ipv4)
{
if (mask->l3_ok) {
- /* application l3_ok filter aggregates all hardware l3 filters
- * therefore hw ipv4_checksum_ok must be implicitly added here.
+ /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
+ * ipv4_csum_ok filters.
+ * Positive RTE l3_ok match requires hardware match on both L3
+ * hardware integrity bits.
+ * For negative match, check hardware l3_csum_ok bit only,
+ * because hardware sets that bit to 0 for all packets
+ * with bad L3.
*/
- struct rte_flow_item_integrity local_item;
-
- local_item.ipv4_csum_ok = !!is_ipv4;
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- local_item.ipv4_csum_ok);
- if (value->l3_ok) {
+ if (is_ipv4) {
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ l3_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l3_ok, 1);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+ ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, local_item.ipv4_csum_ok);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
- mask->l3_ok);
+ ipv4_checksum_ok, !!value->l3_ok);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
value->l3_ok);
- } else {
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ipv4_checksum_ok, 0);
}
- } else if (mask->ipv4_csum_ok) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
- mask->ipv4_csum_ok);
+ }
+ if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
value->ipv4_csum_ok);
}
}
static void
-flow_dv_translate_item_integrity(void *matcher, void *key,
- const struct rte_flow_item *head_item,
- const struct rte_flow_item *integrity_item)
+set_integrity_bits(void *headers_m, void *headers_v,
+ const struct rte_flow_item *integrity_item, bool is_l3_ip4)
{
+ const struct rte_flow_item_integrity *spec = integrity_item->spec;
const struct rte_flow_item_integrity *mask = integrity_item->mask;
- const struct rte_flow_item_integrity *value = integrity_item->spec;
- const struct rte_flow_item *tunnel_item, *end_item, *item;
- void *headers_m;
- void *headers_v;
- uint32_t l3_protocol;
- if (!value)
- return;
+ /* Integrity bits validation cleared spec pointer */
+ MLX5_ASSERT(spec != NULL);
if (!mask)
mask = &rte_flow_item_integrity_mask;
- if (value->level > 1) {
+ flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
+ is_l3_ip4);
+ flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
+}
+
+static void
+flow_dv_translate_item_integrity_post(void *matcher, void *key,
+ const
+ struct rte_flow_item *integrity_items[2],
+ uint64_t pattern_flags)
+{
+ void *headers_m, *headers_v;
+ bool is_l3_ip4;
+
+ if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
+ is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
+ 0;
+ set_integrity_bits(headers_m, headers_v,
+ integrity_items[1], is_l3_ip4);
+ }
+ if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
+ 0;
+ set_integrity_bits(headers_m, headers_v,
+ integrity_items[0], is_l3_ip4);
}
- tunnel_item = mlx5_flow_find_tunnel_item(head_item);
- if (value->level > 1) {
- /* tunnel item was verified during the item validation */
- item = tunnel_item;
- end_item = mlx5_find_end_item(tunnel_item);
+}
+
+static void
+flow_dv_translate_item_integrity(const struct rte_flow_item *item,
+ const struct rte_flow_item *integrity_items[2],
+ uint64_t *last_item)
+{
+ const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
+
+ /* integrity bits validation cleared spec pointer */
+ MLX5_ASSERT(spec != NULL);
+ if (spec->level > 1) {
+ integrity_items[1] = item;
+ *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
} else {
- item = head_item;
- end_item = tunnel_item ? tunnel_item :
- mlx5_find_end_item(integrity_item);
+ integrity_items[0] = item;
+ *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
}
- l3_protocol = mask->l3_ok ?
- mlx5_flow_locate_proto_l3(&item, end_item) : 0;
- flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
- l3_protocol == RTE_ETHER_TYPE_IPV4);
- flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
}
/**
}
static inline int
-flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
+flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
+ struct rte_flow_error *error)
{
uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
struct rte_eth_dev *owndev = &rte_eth_devices[owner];
- RTE_SET_USED(dev);
+ int ret;
MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
if (dev->data->dev_started != 1)
- return -1;
- return flow_dv_aso_ct_dev_release(owndev, idx);
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Indirect CT action cannot be destroyed when the port is stopped");
+ ret = flow_dv_aso_ct_dev_release(owndev, idx);
+ if (ret < 0)
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Current state prevents indirect CT action from being destroyed");
+ return ret;
}
/*
uint32_t i;
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
- obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
- priv->sh->pdn, log_obj_size);
+ obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
uint32_t ct_idx;
MLX5_ASSERT(mng);
- if (!priv->config.devx) {
+ if (!priv->sh->cdev->config.devx) {
rte_errno = ENOTSUP;
return 0;
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *dev_conf = &priv->config;
+ struct mlx5_sh_config *dev_conf = &priv->sh->config;
struct rte_flow *flow = dev_flow->flow;
struct mlx5_flow_handle *handle = dev_flow->handle;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
(1 << MLX5_SCALE_FLOW_GROUP_BIT),
.std_tbl_fix = true,
};
- const struct rte_flow_item *head_item = items;
+ const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+ const struct rte_flow_item *tunnel_item = NULL;
+ const struct rte_flow_item *gre_item = NULL;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
MLX5_FLOW_FATE_QUEUE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
- flow->age = (uint32_t)(uintptr_t)(action->conf);
- age_act = flow_aso_age_get_by_idx(dev, flow->age);
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ age_act = flow_aso_age_get_by_idx(dev, owner_idx);
+ if (flow->age == 0) {
+ flow->age = owner_idx;
+ __atomic_fetch_add(&age_act->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
break;
action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
- flow->counter = (uint32_t)(uintptr_t)(action->conf);
- cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
NULL);
- __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
- __ATOMIC_RELAXED);
- /* Save information first, will apply later. */
- action_flags |= MLX5_FLOW_ACTION_COUNT;
+ MLX5_ASSERT(cnt_act != NULL);
+ /**
+ * When creating meter drop flow in drop table, the
+ * counter should not overwrite the rte flow counter.
+ */
+ if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
+ dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
+ dev_flow->dv.actions[actions_n++] =
+ cnt_act->action;
+ } else {
+ if (flow->counter == 0) {
+ flow->counter = owner_idx;
+ __atomic_fetch_add
+ (&cnt_act->shared_info.refcnt,
+ 1, __ATOMIC_RELAXED);
+ }
+ /* Save information first, will apply later. */
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ }
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- if (!dev_conf->devx) {
+ if (!priv->sh->cdev->config.devx) {
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "Failed to get meter in flow.");
/* Set the meter action. */
dev_flow->dv.actions[actions_n++] =
- wks->fm->meter_action;
+ wks->fm->meter_action_g;
action_flags |= MLX5_FLOW_ACTION_METER;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
else
dev_flow->dv.actions[actions_n] =
ct->dr_action_rply;
- flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
- flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ if (flow->ct == 0) {
+ flow->indirect_type =
+ MLX5_INDIRECT_ACTION_TYPE_CT;
+ flow->ct = owner_idx;
+ __atomic_fetch_add(&ct->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
break;
* when they are not shared.
*/
if (action_flags & MLX5_FLOW_ACTION_AGE) {
- if ((non_shared_age &&
- count && !count->shared) ||
- !(priv->sh->flow_hit_aso_en &&
- (attr->group || attr->transfer))) {
+ if ((non_shared_age && count) ||
+ !flow_hit_aso_supported(priv->sh, attr)) {
/* Creates age by counters. */
cnt_act = flow_dv_prepare_counter
(dev, dev_flow,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ flow_dv_translate_item_esp(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id
(dev, match_mask, match_value, items, attr);
last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
+ case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ flow_dv_translate_item_represented_port
+ (dev, match_mask, match_value, items, attr);
+ last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
+ break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
items, tunnel,
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
+ gre_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
match_value, items);
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(dev, attr,
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan_gpe(match_mask,
- match_value, items,
- tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- flow_dv_translate_item_geneve(match_mask, match_value,
- items, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
case RTE_FLOW_ITEM_TYPE_ICMP:
flow_dv_translate_item_icmp(match_mask, match_value,
items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
flow_dv_translate_item_icmp6(match_mask, match_value,
items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
"cannot create eCPRI parser");
}
flow_dv_translate_item_ecpri(dev, match_mask,
- match_value, items);
+ match_value, items,
+ last_item);
/* No other protocol should follow eCPRI layer. */
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
- flow_dv_translate_item_integrity(match_mask,
- match_value,
- head_item, items);
+ flow_dv_translate_item_integrity(items, integrity_items,
+ &last_item);
break;
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ flow_dv_translate_item_flex(dev, match_mask,
+ match_value, items,
+ dev_flow, tunnel != 0);
+ last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+ MLX5_FLOW_ITEM_OUTER_FLEX;
+ break;
default:
break;
}
/*
* When E-Switch mode is enabled, we have two cases where we need to
* set the source port manually.
- * The first one, is in case of Nic steering rule, and the second is
- * E-Switch rule where no port_id item was found. In both cases
- * the source port is set according the current port in use.
+ * The first one, is in case of NIC ingress steering rule, and the
+ * second is E-Switch rule where no port_id item was found.
+ * In both cases the source port is set according the current port
+ * in use.
*/
if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
- (priv->representor || priv->master)) {
+ !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&
+ !(attr->egress && !attr->transfer)) {
if (flow_dv_translate_item_port_id(dev, match_mask,
match_value, NULL, attr))
return -rte_errno;
}
+ if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+ flow_dv_translate_item_integrity_post(match_mask, match_value,
+ integrity_items,
+ item_flags);
+ }
+ if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+ flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+ flow_dv_translate_item_gre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
+ flow_dv_translate_item_gre_option(match_mask, match_value,
+ tunnel_item, gre_item, item_flags);
+ else
+ MLX5_ASSERT(false);
+ }
#ifdef RTE_LIBRTE_MLX5_DEBUG
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
*/
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
/* If has RSS action in the sample action, the Sample/Mirror resource
* should be registered after the hash filed be update.
*/
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
matcher.priority = mlx5_get_matcher_priority(dev, attr,
- matcher.priority);
+ matcher.priority,
+ dev_flow->external);
/**
* When creating meter drop flow in drop table, using original
* 5-tuple match, the matcher priority should be lower than
case MLX5_RSS_HASH_NONE:
hrxqs[6] = hrxq_idx;
return 0;
+ case MLX5_RSS_HASH_IPV4_ESP:
+ hrxqs[7] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_ESP:
+ hrxqs[8] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_ESP_SPI:
+ hrxqs[9] = hrxq_idx;
+ return 0;
default:
return -1;
}
* @return
* Valid hash RX queue index, otherwise 0.
*/
-static uint32_t
-__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
- const uint64_t hash_fields)
+uint32_t
+flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+ const uint64_t hash_fields)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
+ case MLX5_RSS_HASH_IPV4_ESP:
+ return hrxqs[7];
+ case MLX5_RSS_HASH_IPV6_ESP:
+ return hrxqs[8];
+ case MLX5_RSS_HASH_ESP_SPI:
+ return hrxqs[9];
default:
return 0;
}
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx;
- hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+ hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,
rss_desc->shared_rss,
dev_flow->hash_fields);
if (hrxq_idx)
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- (!priv->config.allow_duplicate_pattern &&
+ (!priv->sh->config.allow_duplicate_pattern &&
errno == EEXIST) ?
"duplicating pattern is not allowed" :
"hardware refuses to create flow");
}
/* Keep the current age handling by default. */
if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
- flow_dv_aso_ct_release(dev, flow->ct);
+ flow_dv_aso_ct_release(dev, flow->ct, NULL);
else if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
if (flow->geneve_tlv_option) {
if (!dev_handle)
return;
flow->dev_handles = dev_handle->next.next;
+ while (dev_handle->flex_item) {
+ int index = rte_bsf32(dev_handle->flex_item);
+
+ mlx5_flex_release_index(dev, index);
+ dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
+ }
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.rix_sample)
* MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
* same slot in mlx5_rss_hash_fields.
*
- * @param[in] rss
- * Pointer to the shared action RSS conf.
+ * @param[in] orig_rss_types
+ * RSS type as provided in shared RSS action.
* @param[in, out] hash_field
* hash_field variable needed to be adjusted.
*
* @return
* void
*/
-static void
-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
- uint64_t *hash_field)
+void
+flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
+ uint64_t *hash_field)
{
- uint64_t rss_types = rss->origin.types;
+ uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
switch (*hash_field & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
*hash_field &= ~MLX5_RSS_HASH_IPV4;
- if (rss_types & ETH_RSS_L3_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_IPV4;
- else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_IPV4;
else
*hash_field |= MLX5_RSS_HASH_IPV4;
case MLX5_RSS_HASH_IPV6:
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
*hash_field &= ~MLX5_RSS_HASH_IPV6;
- if (rss_types & ETH_RSS_L3_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_IPV6;
- else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_IPV6;
else
*hash_field |= MLX5_RSS_HASH_IPV6;
case MLX5_RSS_HASH_IPV4_UDP:
/* fall-through. */
case MLX5_RSS_HASH_IPV6_UDP:
- if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & RTE_ETH_RSS_UDP) {
*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
- if (rss_types & ETH_RSS_L4_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
else
*hash_field |= MLX5_UDP_IBV_RX_HASH;
case MLX5_RSS_HASH_IPV4_TCP:
/* fall-through. */
case MLX5_RSS_HASH_IPV6_TCP:
- if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & RTE_ETH_RSS_TCP) {
*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
- if (rss_types & ETH_RSS_L4_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
else
*hash_field |= MLX5_TCP_IBV_RX_HASH;
struct mlx5_shared_action_rss *shared_rss,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+ shared_rss->ind_tbl = mlx5_ind_table_obj_new
+ (dev, shared_rss->origin.queue,
+ shared_rss->origin.queue_num,
+ true,
+ !!dev->data->dev_started);
+ if (!shared_rss->ind_tbl)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
- }
memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = shared_rss->origin.queue;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
rss_desc.ind_tbl = shared_rss->ind_tbl;
+ if (priv->sh->config.dv_flow_en == 2)
+ rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
- uint32_t hrxq_idx;
+ struct mlx5_hrxq *hrxq;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
int tunnel = 0;
- __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+ flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
+ &hash_fields);
if (shared_rss->origin.level > 1) {
hash_fields |= IBV_RX_HASH_INNER;
tunnel = 1;
}
rss_desc.tunnel = tunnel;
rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
+ hrxq = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
goto error_hrxq_new;
}
err = __flow_dv_action_rss_hrxq_set
- (shared_rss, hash_fields, hrxq_idx);
+ (shared_rss, hash_fields, hrxq->idx);
MLX5_ASSERT(!err);
}
return 0;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss = NULL;
- void *queue = NULL;
struct rte_flow_action_rss *origin;
const uint8_t *rss_key;
- uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
uint32_t idx;
RTE_SET_USED(conf);
- queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
- 0, SOCKET_ID_ANY);
shared_rss = mlx5_ipool_zmalloc
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
- if (!shared_rss || !queue) {
+ if (!shared_rss) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
"rss action number out of range");
goto error_rss_init;
}
- shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*shared_rss->ind_tbl),
- 0, SOCKET_ID_ANY);
- if (!shared_rss->ind_tbl) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- goto error_rss_init;
- }
- memcpy(queue, rss->queue, queue_size);
- shared_rss->ind_tbl->queues = queue;
- shared_rss->ind_tbl->queues_n = rss->queue_num;
origin = &shared_rss->origin;
origin->func = rss->func;
origin->level = rss->level;
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+ origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
origin->key = &shared_rss->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- origin->queue = queue;
+ origin->queue = rss->queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
goto error_rss_init;
+ /* Update queue with indirect table queue memoyr. */
+ origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
error_rss_init:
if (shared_rss) {
if (shared_rss->ind_tbl)
- mlx5_free(shared_rss->ind_tbl);
+ mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
}
- if (queue)
- mlx5_free(queue);
return 0;
}
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
int remaining;
- uint16_t *queue = NULL;
- if (!shared_rss)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "invalid shared action");
- remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (remaining)
- return rte_flow_error_set(error, EBUSY,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "shared rss hrxq has references");
+ if (!shared_rss)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid shared action");
if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
0, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss has references");
- queue = shared_rss->ind_tbl->queues;
- remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (remaining)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss hrxq has references");
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started);
if (remaining)
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss indirection table has"
" references");
- mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
* A valid shared action handle in case of success, NULL otherwise and
* rte_errno is set.
*/
-static struct rte_flow_action_handle *
+struct rte_flow_action_handle *
flow_dv_action_create(struct rte_eth_dev *dev,
const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_destroy(struct rte_eth_dev *dev,
struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
" released with references %d.", idx, ret);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_CT:
- ret = flow_dv_aso_ct_release(dev, idx);
+ ret = flow_dv_aso_ct_release(dev, idx, error);
if (ret < 0)
return ret;
if (ret > 0)
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
int ret = 0;
void *queue = NULL;
- uint16_t *queue_old = NULL;
+ void *queue_i = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+ bool dev_started = !!dev->data->dev_started;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
memcpy(queue, action_conf->queue, queue_size);
MLX5_ASSERT(shared_rss->ind_tbl);
rte_spinlock_lock(&shared_rss->action_rss_sl);
- queue_old = shared_rss->ind_tbl->queues;
+ queue_i = shared_rss->ind_tbl->queues;
ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
- queue, action_conf->queue_num, true);
+ queue, action_conf->queue_num,
+ true /* standalone */,
+ dev_started /* ref_new_qs */,
+ dev_started /* deref_old_qs */);
if (ret) {
- mlx5_free(queue);
ret = rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"cannot update indirection table");
} else {
- mlx5_free(queue_old);
- shared_rss->origin.queue = queue;
+ /* Restore the queue to indirect table internal queue. */
+ memcpy(queue_i, queue, queue_size);
+ shared_rss->ind_tbl->queues = queue_i;
shared_rss->origin.queue_num = action_conf->queue_num;
}
+ mlx5_free(queue);
rte_spinlock_unlock(&shared_rss->action_rss_sl);
return ret;
}
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_update(struct rte_eth_dev *dev,
struct rte_flow_action_handle *handle,
const void *update,
for (i = 0; i < RTE_COLORS; i++) {
next_fm = NULL;
- if (i == RTE_COLOR_GREEN && policy &&
+ if (i <= RTE_COLOR_YELLOW && policy &&
policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
next_fm = mlx5_flow_meter_find(priv,
policy->act_cnt[i].next_mtr_id, NULL);
- TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
+ RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
next_port, tmp) {
claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
tbl = container_of(color_rule->matcher->tbl,
mtr_policy->dr_drop_action[j] = NULL;
}
+/**
+ * Create yellow action for color aware meter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] fm
+ * Meter information table.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+__flow_dv_create_mtr_yellow_action(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ struct rte_mtr_error *error)
+{
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_error flow_err;
+ struct mlx5_aso_mtr *aso_mtr;
+ struct mlx5_aso_mtr_pool *pool;
+ uint8_t reg_id;
+
+ aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
+ pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool, mtrs[aso_mtr->offset]);
+ reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
+ fm->meter_action_y =
+ mlx5_glue->dv_create_flow_action_aso(priv->sh->rx_domain,
+ pool->devx_obj->obj,
+ aso_mtr->offset,
+ (1 << MLX5_FLOW_COLOR_YELLOW),
+ reg_id - REG_C_0);
+#else
+ RTE_SET_USED(dev);
+#endif
+ if (!fm->meter_action_y) {
+ return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Fail to create yellow meter action.");
+ }
+ return 0;
+}
+
/**
* Create policy action per domain, lock free,
* (mutex should be acquired by caller).
* Meter policy struct.
* @param[in] action
* Action specification used to create meter actions.
+ * @param[in] attr
+ * Pointer to the flow attributes.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
__flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
enum mlx5_meter_domain domain,
struct rte_mtr_error *error)
{
NULL,
"cannot create policy "
"mark action for this color");
- dev_flow.handle->mark = 1;
if (flow_dv_tag_resource_register(dev, tag_be,
&dev_flow, &flow_err))
return -rte_mtr_error_set(error,
act_cnt->rix_mark =
dev_flow.handle->dvh.rix_tag;
action_flags |= MLX5_FLOW_ACTION_MARK;
+ mtr_policy->mark = 1;
break;
}
case RTE_FLOW_ACTION_TYPE_SET_TAG:
break;
}
case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
{
struct mlx5_flow_dv_port_id_action_resource
port_id_resource;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
}
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ {
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy modify field for this color");
+ if (flow_dv_convert_action_modify_field
+ (dev, mhdr_res, act, attr, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot setup policy modify field action");
+ if (!mhdr_res->actions_num)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot find policy modify field action");
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ break;
+ }
/*
- * No need to check meter hierarchy for Y or R colors
+ * No need to check meter hierarchy for R colors
* here since it is done in the validation stage.
*/
case RTE_FLOW_ACTION_TYPE_METER:
action_flags |=
MLX5_FLOW_ACTION_SET_TAG;
}
+ if (i == RTE_COLOR_YELLOW && next_fm->color_aware &&
+ !next_fm->meter_action_y)
+ if (__flow_dv_create_mtr_yellow_action(dev, next_fm, error))
+ return -rte_errno;
act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
act_cnt->next_mtr_id = next_fm->meter_id;
act_cnt->next_sub_policy = NULL;
mtr_policy->is_hierarchy = 1;
mtr_policy->dev = next_policy->dev;
+ if (next_policy->mark)
+ mtr_policy->mark = 1;
action_flags |=
MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
break;
RTE_MTR_ERROR_TYPE_METER_POLICY,
NULL, "action type not supported");
}
- if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
+ if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) ||
+ (action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD)) {
/* create modify action if needed. */
dev_flow.dv.group = 1;
if (flow_dv_modify_hdr_resource_register
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL, "cannot register policy "
- "set tag action");
+ NULL, "cannot register policy set tag/modify field action");
act_cnt->modify_hdr =
dev_flow.handle->dvh.modify_hdr;
}
* Meter policy struct.
* @param[in] action
* Action specification used to create meter actions.
+ * @param[in] attr
+ * Pointer to the flow attributes.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
struct rte_mtr_error *error)
{
int ret, i;
MLX5_MTR_SUB_POLICY_NUM_MASK;
if (sub_policy_num) {
ret = __flow_dv_create_domain_policy_acts(dev,
- mtr_policy, actions,
+ mtr_policy, actions, attr,
(enum mlx5_meter_domain)i, error);
/* Cleaning resource is done in the caller level. */
if (ret)
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_query_count *qc = data;
- if (!priv->config.devx)
+ if (!priv->sh->cdev->config.devx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"counters are not available");
}
-static int
+int
flow_dv_action_query(struct rte_eth_dev *dev,
const struct rte_flow_action_handle *handle, void *data,
struct rte_flow_error *error)
struct mlx5_priv *priv = dev->data->dev_private;
int i;
- if (!fm || !priv->config.dv_flow_en)
+ if (!fm || !priv->sh->config.dv_flow_en)
return;
for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
if (fm->drop_rule[i]) {
struct mlx5_priv *priv = dev->data->dev_private;
uint8_t misc_mask;
- if (match_src_port && (priv->representor || priv->master)) {
+ if (match_src_port && priv->sh->esw_mode) {
if (flow_dv_translate_item_port_id(dev, matcher.buf,
value.buf, item, attr)) {
DRV_LOG(ERR, "Failed to create meter policy%d flow's"
struct mlx5_priv *priv = dev->data->dev_private;
const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
- if (match_src_port && (priv->representor || priv->master)) {
+ if (match_src_port && priv->sh->esw_mode) {
if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
value.buf, item, attr)) {
DRV_LOG(ERR, "Failed to register meter policy%d matcher"
struct mlx5_flow_dv_tag_resource *tag;
struct mlx5_flow_dv_port_id_action_resource *port_action;
struct mlx5_hrxq *hrxq;
- struct mlx5_flow_meter_info *next_fm = NULL;
+ struct mlx5_flow_meter_info *next_fm[RTE_COLORS] = {NULL};
struct mlx5_flow_meter_policy *next_policy;
struct mlx5_flow_meter_sub_policy *next_sub_policy;
struct mlx5_flow_tbl_data_entry *tbl_data;
acts[i].actions_n = 1;
continue;
}
- if (i == RTE_COLOR_GREEN &&
- mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
+ if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
struct rte_flow_attr attr = {
.transfer = transfer
};
- next_fm = mlx5_flow_meter_find(priv,
+ next_fm[i] = mlx5_flow_meter_find(priv,
mtr_policy->act_cnt[i].next_mtr_id,
NULL);
- if (!next_fm) {
+ if (!next_fm[i]) {
DRV_LOG(ERR,
"Failed to get next hierarchy meter.");
goto err_exit;
}
- if (mlx5_flow_meter_attach(priv, next_fm,
+ if (mlx5_flow_meter_attach(priv, next_fm[i],
&attr, &error)) {
DRV_LOG(ERR, "%s", error.message);
- next_fm = NULL;
+ next_fm[i] = NULL;
goto err_exit;
}
/* Meter action must be the first for TX. */
if (mtr_first) {
acts[i].dv_actions[acts[i].actions_n] =
- next_fm->meter_action;
+ (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
+ next_fm[i]->meter_action_y :
+ next_fm[i]->meter_action_g;
acts[i].actions_n++;
}
}
acts[i].actions_n++;
break;
case MLX5_FLOW_FATE_MTR:
- if (!next_fm) {
+ if (!next_fm[i]) {
DRV_LOG(ERR,
"No next hierarchy meter.");
goto err_exit;
}
if (!mtr_first) {
acts[i].dv_actions[acts[i].actions_n] =
- next_fm->meter_action;
+ (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
+ next_fm[i]->meter_action_y :
+ next_fm[i]->meter_action_g;
acts[i].actions_n++;
}
if (mtr_policy->act_cnt[i].next_sub_policy) {
} else {
next_policy =
mlx5_flow_meter_policy_find(dev,
- next_fm->policy_id, NULL);
+ next_fm[i]->policy_id, NULL);
MLX5_ASSERT(next_policy);
next_sub_policy =
next_policy->sub_policys[domain][0];
}
return 0;
err_exit:
- if (next_fm)
- mlx5_flow_meter_detach(priv, next_fm);
+ for (i = 0; i < RTE_COLORS; i++)
+ if (next_fm[i])
+ mlx5_flow_meter_detach(priv, next_fm[i]);
return -1;
}
/* Non-termination policy table. */
for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
- if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
+ if (!priv->sh->config.dv_esw_en &&
+ i == MLX5_MTR_DOMAIN_TRANSFER)
continue;
if (__flow_dv_create_domain_def_policy(dev, i)) {
DRV_LOG(ERR, "Failed to create default policy");
struct mlx5_meter_policy_action_container *act_cnt;
uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
uint16_t sub_policy_num;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
rte_spinlock_lock(&mtr_policy->sl);
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
continue;
- hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
- if (!hrxq_idx[i]) {
+ hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
+ if (!hrxq) {
rte_spinlock_unlock(&mtr_policy->sl);
return NULL;
}
+ hrxq_idx[i] = hrxq->idx;
}
sub_policy_num = (mtr_policy->sub_policy_num >>
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
}
}
/* Create sub policy. */
- if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+ if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
+ !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
/* Reuse the first pre-allocated sub_policy. */
sub_policy = mtr_policy->sub_policys[domain][0];
sub_policy_idx = sub_policy->idx;
if (act_cnt->rix_mark || act_cnt->modify_hdr) {
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
if (act_cnt->rix_mark)
- dh.mark = 1;
+ wks->mark = 1;
dh.fate_action = MLX5_FLOW_FATE_QUEUE;
dh.rix_hrxq = hrxq_idx[i];
flow_drv_rxq_flags_set(dev, &dh);
DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
return NULL;
}
- next_fm = mlx5_flow_meter_find(priv,
- mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
+ rte_spinlock_lock(&mtr_policy->sl);
+ next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
+ rte_spinlock_unlock(&mtr_policy->sl);
if (!next_fm) {
DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
return NULL;
return NULL;
}
+/**
+ * Check if need to create hierarchy tag rule.
+ *
+ * @param[in] priv
+ * Pointer to mlx5_priv.
+ * @param[in] mtr_policy
+ * Pointer to current meter policy.
+ * @param[in] src_port
+ * The src port this extra rule should use.
+ * @param[out] next_fm
+ * Pointer to next meter in hierarchy.
+ * @param[out] skip
+ * Indicate if skip the tag rule creation.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_meter_hierarchy_skip_tag_rule(struct mlx5_priv *priv,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ int32_t src_port,
+ struct mlx5_flow_meter_info **next_fm,
+ bool *skip,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ struct mlx5_sub_policy_color_rule *color_rule;
+ uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
+ int ret = 0;
+ int i;
+
+ *next_fm = NULL;
+ *skip = false;
+ rte_spinlock_lock(&mtr_policy->sl);
+ if (!mtr_policy->is_hierarchy)
+ goto exit;
+ *next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
+ if (!*next_fm) {
+ ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to find next meter in hierarchy.");
+ goto exit;
+ }
+ if (!(*next_fm)->drop_cnt) {
+ *skip = true;
+ goto exit;
+ }
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ if (mtr_policy->act_cnt[i].fate_action != MLX5_FLOW_FATE_MTR)
+ continue;
+ TAILQ_FOREACH(color_rule, &sub_policy->color_rules[i], next_port)
+ if (color_rule->src_port == src_port) {
+ *skip = true;
+ goto exit;
+ }
+ }
+exit:
+ rte_spinlock_unlock(&mtr_policy->sl);
+ return ret;
+}
+
/**
* Create the sub policy tag rule for all meters in hierarchy.
*
.reserved = 0,
};
uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
- int i;
+ struct {
+ struct mlx5_flow_meter_policy *fm_policy;
+ struct mlx5_flow_meter_info *next_fm;
+ struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS];
+ } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
+ uint32_t fm_cnt = 0;
+ uint32_t i, j;
- mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
- MLX5_ASSERT(mtr_policy);
- if (!mtr_policy->is_hierarchy)
- return 0;
- next_fm = mlx5_flow_meter_find(priv,
- mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
- if (!next_fm) {
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "Failed to find next meter in hierarchy.");
- }
- if (!next_fm->drop_cnt)
- goto exit;
color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
- sub_policy = mtr_policy->sub_policys[domain][0];
- for (i = 0; i < RTE_COLORS; i++) {
- bool rule_exist = false;
- struct mlx5_meter_policy_action_container *act_cnt;
+ /* Get all fms who need to create the tag color rule. */
+ do {
+ bool skip = false;
- if (i >= RTE_COLOR_YELLOW)
- break;
- TAILQ_FOREACH(color_rule,
- &sub_policy->color_rules[i], next_port)
- if (color_rule->src_port == src_port) {
- rule_exist = true;
- break;
- }
- if (rule_exist)
- continue;
- color_rule = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_sub_policy_color_rule),
- 0, SOCKET_ID_ANY);
- if (!color_rule)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "No memory to create tag color rule.");
- color_rule->src_port = src_port;
- attr.priority = i;
- next_policy = mlx5_flow_meter_policy_find(dev,
- next_fm->policy_id, NULL);
- MLX5_ASSERT(next_policy);
- next_sub_policy = next_policy->sub_policys[domain][0];
- tbl_data = container_of(next_sub_policy->tbl_rsc,
- struct mlx5_flow_tbl_data_entry, tbl);
- act_cnt = &mtr_policy->act_cnt[i];
- if (mtr_first) {
- acts.dv_actions[0] = next_fm->meter_action;
- acts.dv_actions[1] = act_cnt->modify_hdr->action;
- } else {
- acts.dv_actions[0] = act_cnt->modify_hdr->action;
- acts.dv_actions[1] = next_fm->meter_action;
- }
- acts.dv_actions[2] = tbl_data->jump.action;
- acts.actions_n = 3;
- if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
- next_fm = NULL;
- goto err_exit;
- }
- if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
- MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
- &attr, true, item,
- &color_rule->matcher, error)) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to create hierarchy meter matcher.");
+ mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
+ MLX5_ASSERT(mtr_policy);
+ if (mlx5_meter_hierarchy_skip_tag_rule(priv, mtr_policy, src_port,
+ &next_fm, &skip, error))
goto err_exit;
+ if (next_fm && !skip) {
+ fm_info[fm_cnt].fm_policy = mtr_policy;
+ fm_info[fm_cnt].next_fm = next_fm;
+ if (++fm_cnt >= MLX5_MTR_CHAIN_MAX_NUM) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Exceed max meter number in hierarchy.");
+ goto err_exit;
+ }
}
- if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
- (enum rte_color)i,
- color_rule->matcher->matcher_object,
- acts.actions_n, acts.dv_actions,
- true, item,
- &color_rule->rule, &attr)) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to create hierarchy meter rule.");
- goto err_exit;
+ fm = next_fm;
+ } while (fm);
+ /* Create tag color rules for all needed fms. */
+ for (i = 0; i < fm_cnt; i++) {
+ void *mtr_action;
+
+ mtr_policy = fm_info[i].fm_policy;
+ rte_spinlock_lock(&mtr_policy->sl);
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+ if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR)
+ continue;
+ color_rule = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_sub_policy_color_rule),
+ 0, SOCKET_ID_ANY);
+ if (!color_rule) {
+ rte_spinlock_unlock(&mtr_policy->sl);
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "No memory to create tag color rule.");
+ goto err_exit;
+ }
+ color_rule->src_port = src_port;
+ next_fm = fm_info[i].next_fm;
+ if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
+ mlx5_free(color_rule);
+ rte_spinlock_unlock(&mtr_policy->sl);
+ goto err_exit;
+ }
+ fm_info[i].tag_rule[j] = color_rule;
+ TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port);
+ /* Prepare to create color rule. */
+ mtr_action = (next_fm->color_aware && j == RTE_COLOR_YELLOW) ?
+ next_fm->meter_action_y :
+ next_fm->meter_action_g;
+ next_policy = mlx5_flow_meter_policy_find(dev, next_fm->policy_id, NULL);
+ MLX5_ASSERT(next_policy);
+ next_sub_policy = next_policy->sub_policys[domain][0];
+ tbl_data = container_of(next_sub_policy->tbl_rsc,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ if (mtr_first) {
+ acts.dv_actions[0] = mtr_action;
+ acts.dv_actions[1] = mtr_policy->act_cnt[j].modify_hdr->action;
+ } else {
+ acts.dv_actions[0] = mtr_policy->act_cnt[j].modify_hdr->action;
+ acts.dv_actions[1] = mtr_action;
+ }
+ acts.dv_actions[2] = tbl_data->jump.action;
+ acts.actions_n = 3;
+ if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
+ MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
+ &attr, true, item, &color_rule->matcher, error)) {
+ rte_spinlock_unlock(&mtr_policy->sl);
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create hierarchy meter matcher.");
+ goto err_exit;
+ }
+ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j,
+ color_rule->matcher->matcher_object,
+ acts.actions_n, acts.dv_actions,
+ true, item, &color_rule->rule, &attr)) {
+ rte_spinlock_unlock(&mtr_policy->sl);
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create hierarchy meter rule.");
+ goto err_exit;
+ }
}
- TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
- color_rule, next_port);
+ rte_spinlock_unlock(&mtr_policy->sl);
}
-exit:
- /**
- * Recursive call to iterate all meters in hierarchy and
- * create needed rules.
- */
- return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
- src_port, item, error);
+ return 0;
err_exit:
- if (color_rule) {
- if (color_rule->rule)
- mlx5_flow_os_destroy_flow(color_rule->rule);
- if (color_rule->matcher) {
- struct mlx5_flow_tbl_data_entry *tbl =
- container_of(color_rule->matcher->tbl,
- typeof(*tbl), tbl);
- mlx5_list_unregister(tbl->matchers,
- &color_rule->matcher->entry);
+ for (i = 0; i < fm_cnt; i++) {
+ mtr_policy = fm_info[i].fm_policy;
+ rte_spinlock_lock(&mtr_policy->sl);
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+ color_rule = fm_info[i].tag_rule[j];
+ if (!color_rule)
+ continue;
+ if (color_rule->rule)
+ mlx5_flow_os_destroy_flow(color_rule->rule);
+ if (color_rule->matcher) {
+ struct mlx5_flow_tbl_data_entry *tbl =
+ container_of(color_rule->matcher->tbl, typeof(*tbl), tbl);
+ mlx5_list_unregister(tbl->matchers, &color_rule->matcher->entry);
+ }
+ if (fm_info[i].next_fm)
+ mlx5_flow_meter_detach(priv, fm_info[i].next_fm);
+ TAILQ_REMOVE(&sub_policy->color_rules[j], color_rule, next_port);
+ mlx5_free(color_rule);
}
- mlx5_free(color_rule);
+ rte_spinlock_unlock(&mtr_policy->sl);
}
- if (next_fm)
- mlx5_flow_meter_detach(priv, next_fm);
return -rte_errno;
}
goto err;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
- &matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->obj, &matcher);
if (ret)
goto err;
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
0, 0, 0, NULL);
if (!tbl)
goto err;
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
if (!dcs)
goto err;
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
goto err;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
- &matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->obj, &matcher);
if (ret)
goto err;
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
*/
static int
flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
- uint64_t *pkts, uint64_t *bytes)
+ uint64_t *pkts, uint64_t *bytes, void **action)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter *cnt;
uint64_t inn_pkts, inn_bytes;
int ret;
- if (!priv->config.devx)
+ if (!priv->sh->cdev->config.devx)
return -1;
ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
if (ret)
return -1;
cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
+ if (cnt && action)
+ *action = cnt->action;
+
*pkts = inn_pkts - cnt->hits;
*bytes = inn_bytes - cnt->bytes;
if (clear) {
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_validate(struct rte_eth_dev *dev,
const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
"Indirect age action not supported");
return flow_dv_validate_action_age(0, action, dev, err);
case RTE_FLOW_ACTION_TYPE_COUNT:
- /*
- * There are two mechanisms to share the action count.
- * The old mechanism uses the shared field to share, while the
- * new mechanism uses the indirect action API.
- * This validation comes to make sure that the two mechanisms
- * are not combined.
- */
- if (is_shared_action_count(action))
- return rte_flow_error_set(err, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "Mix shared and indirect counter is not supported");
- return flow_dv_validate_action_count(dev, true, 0, err);
+ return flow_dv_validate_action_count(dev, true, 0, NULL, err);
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
if (!priv->sh->ct_aso_en)
return rte_flow_error_set(err, ENOTSUP,
flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
const struct rte_flow_action_rss *r2)
{
- if (!r1 || !r2)
+ if (r1 == NULL || r2 == NULL)
return 0;
- if (r1->func != r2->func || r1->level != r2->level ||
- r1->types != r2->types || r1->key_len != r2->key_len ||
- memcmp(r1->key, r2->key, r1->key_len))
+ if (!(r1->level <= 1 && r2->level <= 1) &&
+ !(r1->level > 1 && r2->level > 1))
+ return 1;
+ if (r1->types != r2->types &&
+ !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
+ (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
return 1;
+ if (r1->key || r2->key) {
+ const void *key1 = r1->key ? r1->key : rss_hash_default_key;
+ const void *key2 = r2->key ? r2->key : rss_hash_default_key;
+
+ if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
+ return 1;
+ }
return 0;
}
RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
NULL,
"Multiple fate actions not supported.");
+ *hierarchy_domain = 0;
+ fm = mlx5_flow_meter_find(priv, meter_id, NULL);
while (true) {
- fm = mlx5_flow_meter_find(priv, meter_id, NULL);
if (!fm)
return -rte_mtr_error_set(error, EINVAL,
RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
return -rte_mtr_error_set(error, EINVAL,
RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
"Non termination meter not supported in hierarchy.");
+ if (!fm->shared)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Only shared meter supported in hierarchy.");
policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
MLX5_ASSERT(policy);
- if (!policy->is_hierarchy) {
+ /**
+ * Only inherit the supported domains of the first meter in
+ * hierarchy.
+ * One meter supports at least one domain.
+ */
+ if (!*hierarchy_domain) {
if (policy->transfer)
*hierarchy_domain |=
MLX5_MTR_DOMAIN_TRANSFER_BIT;
MLX5_MTR_DOMAIN_INGRESS_BIT;
if (policy->egress)
*hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
+ }
+ if (!policy->is_hierarchy) {
*is_rss = policy->is_rss;
break;
}
- meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
+ rte_spinlock_lock(&policy->sl);
+ fm = mlx5_flow_meter_hierarchy_next_meter(priv, policy, NULL);
+ rte_spinlock_unlock(&policy->sl);
if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
return -rte_mtr_error_set(error, EINVAL,
RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
struct rte_mtr_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *dev_conf = &priv->config;
+ struct mlx5_sh_config *dev_conf = &priv->sh->config;
const struct rte_flow_action *act;
uint64_t action_flags[RTE_COLORS] = {0};
int actions_n;
uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
uint8_t hierarchy_domain = 0;
const struct rte_flow_action_meter *mtr;
+ const struct rte_flow_action_meter *next_mtr = NULL;
bool def_green = false;
bool def_yellow = false;
const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
- if (!priv->config.dv_esw_en)
+ if (!dev_conf->dv_esw_en)
def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
*domain_bitmap = def_domain;
/* Red color could only support DROP action. */
*policy_mode = MLX5_MTR_POLICY_MODE_OG;
} else if (def_green && !def_yellow) {
*policy_mode = MLX5_MTR_POLICY_MODE_OY;
+ } else {
+ *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
}
/* Set to empty string in case of NULL pointer access by user. */
flow_err.message = "";
NULL, "too many actions");
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_PORT_ID:
- if (!priv->config.dv_esw_en)
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ if (!dev_conf->dv_esw_en)
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
++actions_n;
action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
break;
- /*
- * Only the last meter in the hierarchy will support
- * the YELLOW color steering. Then in the meter policy
- * actions list, there should be no other meter inside.
- */
case RTE_FLOW_ACTION_TYPE_METER:
- if (i != RTE_COLOR_GREEN)
- return -rte_mtr_error_set(error,
- ENOTSUP,
- RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL,
- "Meter hierarchy only supports GREEN color.");
- if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
- return -rte_mtr_error_set(error,
- ENOTSUP,
- RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL,
- "No yellow policy should be provided in meter hierarchy.");
mtr = act->conf;
+ if (next_mtr && next_mtr->mtr_id != mtr->mtr_id)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+ "Green and Yellow must use the same meter.");
ret = flow_dv_validate_policy_mtr_hierarchy(dev,
mtr->mtr_id,
action_flags[i],
++actions_n;
action_flags[i] |=
MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
+ next_mtr = mtr;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ ret = flow_dv_validate_action_modify_field(dev,
+ action_flags[i], act, attr, &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Modify field action validate check fail");
+ ++actions_n;
+ action_flags[i] |= MLX5_FLOW_ACTION_MODIFY_FIELD;
break;
default:
return -rte_mtr_error_set(error, ENOTSUP,
"Doesn't support optional action");
}
}
- if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID)
+ if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
- else if ((action_flags[i] &
+ } else if ((action_flags[i] &
(MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
- (action_flags[i] & MLX5_FLOW_ACTION_MARK))
+ (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
/*
* Only support MLX5_XMETA_MODE_LEGACY
* so MARK action is only in ingress domain.
*/
domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
- else if (action_flags[i] &
- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
- domain_color[i] = hierarchy_domain;
- else
+ } else {
domain_color[i] = def_domain;
+ if (action_flags[i] &&
+ !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ domain_color[i] &=
+ ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
+ }
+ if (action_flags[i] &
+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
+ domain_color[i] &= hierarchy_domain;
/*
* Non-termination actions only support NIC Tx domain.
* The adjustion should be skipped when there is no
}
}
}
+ if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) {
+ if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] &
+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY))
+ return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "Meter hierarchy supports meter action only.");
+ }
/* If both colors have RSS, the attributes should be the same. */
if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
rss_color[RTE_COLOR_YELLOW]))
return 0;
}
+/**
+ * Discover the number of available flow priorities
+ * by trying to create a flow with the highest priority value
+ * for each possible number.
+ *
+ * @param[in] dev
+ * Ethernet device.
+ * @param[in] vprio
+ * List of possible number of available priorities.
+ * @param[in] vprio_n
+ * Size of @p vprio array.
+ * @return
+ * On success, number of available flow priorities.
+ * On failure, a negative errno-style code and rte_errno is set.
+ */
+static int
+flow_dv_discover_priorities(struct rte_eth_dev *dev,
+ const uint16_t *vprio, int vprio_n)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = ð,
+ .mask = ð,
+ };
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ union mlx5_flow_tbl_key tbl_key;
+ struct mlx5_flow flow;
+ void *action;
+ struct rte_flow_error error;
+ uint8_t misc_mask;
+ int i, err, ret = -ENOTSUP;
+
+ /*
+ * Prepare a flow with a catch-all pattern and a drop action.
+ * Use drop queue, because shared drop action may be unavailable.
+ */
+ action = priv->drop_queue.hrxq->action;
+ if (action == NULL) {
+ DRV_LOG(ERR, "Priority discovery requires a drop action");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ memset(&flow, 0, sizeof(flow));
+ flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
+ if (flow.handle == NULL) {
+ DRV_LOG(ERR, "Cannot create flow handle");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ flow.ingress = true;
+ flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ flow.dv.actions[0] = action;
+ flow.dv.actions_n = 1;
+ memset(ð, 0, sizeof(eth));
+ flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
+ &item, /* inner */ false, /* group */ 0);
+ matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
+ for (i = 0; i < vprio_n; i++) {
+ /* Configure the next proposed maximum priority. */
+ matcher.priority = vprio[i] - 1;
+ memset(&tbl_key, 0, sizeof(tbl_key));
+ err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
+ /* tunnel */ NULL,
+ /* group */ 0,
+ &error);
+ if (err != 0) {
+ /* This action is pure SW and must always succeed. */
+ DRV_LOG(ERR, "Cannot register matcher");
+ ret = -rte_errno;
+ break;
+ }
+ /* Try to apply the flow to HW. */
+ misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
+ __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
+ err = mlx5_flow_os_create_flow
+ (flow.handle->dvh.matcher->matcher_object,
+ (void *)&flow.dv.value, flow.dv.actions_n,
+ flow.dv.actions, &flow.handle->drv_flow);
+ if (err == 0) {
+ claim_zero(mlx5_flow_os_destroy_flow
+ (flow.handle->drv_flow));
+ flow.handle->drv_flow = NULL;
+ }
+ claim_zero(flow_dv_matcher_release(dev, flow.handle));
+ if (err != 0)
+ break;
+ ret = vprio[i];
+ }
+ mlx5_ipool_free(pool, flow.handle_idx);
+ /* Set rte_errno if no expected priority value matched. */
+ if (ret < 0)
+ rte_errno = -ret;
+ return ret;
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.action_update = flow_dv_action_update,
.action_query = flow_dv_action_query,
.sync_domain = flow_dv_sync_domain,
+ .discover_priorities = flow_dv_discover_priorities,
+ .item_create = flow_dv_item_create,
+ .item_release = flow_dv_item_release,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-