#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
static int
flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
- uint32_t encap_decap_idx);
+ uint32_t encap_decap_idx);
static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
static void
flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
+static int
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ uint32_t rix_jump);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
(int)dcopy->offset < 0 ? off_b : dcopy->offset;
/* Convert entire record to big-endian format. */
actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
+ ++dcopy;
} else {
MLX5_ASSERT(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
MLX5_ASSERT(conf->id != REG_NON);
- MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
+ MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
.field = reg_to_field[conf->id],
MLX5_MODIFICATION_TYPE_SET, error);
}
+static void
+mlx5_flow_field_id_to_modify_info
+ (const struct rte_flow_action_modify_data *data,
+ struct field_modify_info *info,
+ uint32_t *mask, uint32_t *value, uint32_t width,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ uint32_t idx = 0;
+ switch (data->field) {
+ case RTE_FLOW_FIELD_START:
+ /* not supported yet */
+ MLX5_ASSERT(false);
+ break;
+ case RTE_FLOW_FIELD_MAC_DST:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DMAC_47_16};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){2, 4 * idx,
+ MLX5_MODI_OUT_DMAC_15_0};
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DMAC_47_16};
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_DMAC_15_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_MAC_SRC:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SMAC_47_16};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){2, 4 * idx,
+ MLX5_MODI_OUT_SMAC_15_0};
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SMAC_47_16};
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_SMAC_15_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ /* not supported yet */
+ break;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_FIRST_VID};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x00000fff >>
+ (12 - width));
+ break;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_ETHERTYPE};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IP_DSCP};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IPV4_TTL};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x000000ff >>
+ (8 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV4};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_DST:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV4};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IP_DSCP};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x000000ff >>
+ (8 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_127_96};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 64) {
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_SIPV6_95_64};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 96) {
+ info[idx] = (struct field_modify_info){4,
+ 8 * idx,
+ MLX5_MODI_OUT_SIPV6_63_32};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){4, 12 * idx,
+ MLX5_MODI_OUT_SIPV6_31_0};
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_127_96};
+ if (data->offset < 64)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_95_64};
+ if (data->offset < 96)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_63_32};
+ if (data->offset < 128)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_31_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_IPV6_DST:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_127_96};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 64) {
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_DIPV6_95_64};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 96) {
+ info[idx] = (struct field_modify_info){4,
+ 8 * idx,
+ MLX5_MODI_OUT_DIPV6_63_32};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){4, 12 * idx,
+ MLX5_MODI_OUT_DIPV6_31_0};
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_127_96};
+ if (data->offset < 64)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_95_64};
+ if (data->offset < 96)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_63_32};
+ if (data->offset < 128)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_31_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_TCP_SPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_TCP_DPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_TCP_SEQ_NUM};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_TCP_ACK_NUM};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_TCP_FLAGS};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_UDP_SPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_UDP_DPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ /* not supported yet */
+ break;
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ /* not supported yet*/
+ break;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_GTP_TEID};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TAG:
+ {
+ int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
+ data->level, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_MARK:
+ {
+ int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
+ 0, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_META:
+ {
+ int reg = flow_dv_get_metadata_reg(dev, attr, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_POINTER:
+ for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
+ if (mask[idx]) {
+ memcpy(&value[idx],
+ (void *)(uintptr_t)data->value, 32);
+ value[idx] = rte_cpu_to_be_32(value[idx]);
+ break;
+ }
+ }
+ break;
+ case RTE_FLOW_FIELD_VALUE:
+ for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
+ if (mask[idx]) {
+ value[idx] =
+ rte_cpu_to_be_32((uint32_t)data->value);
+ break;
+ }
+ }
+ break;
+ default:
+ MLX5_ASSERT(false);
+ break;
+ }
+}
+
+/**
+ * Convert modify_field action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_field
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_modify_field *conf =
+ (const struct rte_flow_action_modify_field *)(action->conf);
+ struct rte_flow_item item;
+ struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, 0} };
+ struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, 0} };
+ uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
+ uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
+ uint32_t type;
+
+ if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
+ conf->src.field == RTE_FLOW_FIELD_VALUE) {
+ type = MLX5_MODIFICATION_TYPE_SET;
+ /** For SET fill the destination field (field) first. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
+ value, conf->width, dev, attr, error);
+ /** Then copy immediate value from source as per mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
+ value, conf->width, dev, attr, error);
+ item.spec = &value;
+ } else {
+ type = MLX5_MODIFICATION_TYPE_COPY;
+ /** For COPY fill the destination field (dcopy) without mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
+ value, conf->width, dev, attr, error);
+ /** Then construct the source field (field) with mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
+ value, conf->width, dev, attr, error);
+ }
+ item.mask = &mask;
+ return flow_dv_convert_modify_action(&item,
+ field, dcopy, resource, type, error);
+}
+
/**
* Validate MARK item.
*
"isn't supported");
if (reg != REG_A)
nic_mask.data = priv->sh->dv_meta_mask;
- } else if (attr->transfer) {
- return rte_flow_error_set(error, ENOTSUP,
+ } else {
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extended metadata feature "
"should be enabled when "
"meta item is requested "
"with e-switch mode ");
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on metadata for ingress "
+ "is not supported in legacy "
+ "metadata mode");
}
if (!mask)
mask = &rte_flow_item_meta_mask;
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
+/**
+ * Validate GTP PSC item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] gtp_item
+ * Previous GTP item specification.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
+ uint64_t last_item,
+ const struct rte_flow_item *gtp_item,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_gtp *gtp_spec;
+ const struct rte_flow_item_gtp *gtp_mask;
+ const struct rte_flow_item_gtp_psc *spec;
+ const struct rte_flow_item_gtp_psc *mask;
+ const struct rte_flow_item_gtp_psc nic_mask = {
+ .pdu_type = 0xFF,
+ .qfi = 0xFF,
+ };
+
+ if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GTP PSC item must be preceded with GTP item");
+ gtp_spec = gtp_item->spec;
+ gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
+ /* GTP spec and E flag is requested to match zero. */
+ if (gtp_spec &&
+ (gtp_mask->v_pt_rsv_flags &
+ ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GTP E flag must be 1 to match GTP PSC");
+ /* Check the flow is not created in group zero. */
+ if (!attr->transfer && !attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "GTP PSC is not supported for group 0");
+ /* GTP spec is here and E flag is requested to match zero. */
+ if (!item->spec)
+ return 0;
+ spec = item->spec;
+ mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
+ if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "PDU type should be smaller than 16");
+ return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gtp_psc),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+}
+
/**
* Validate IPV4 item.
* Use existing validation function mlx5_flow_validate_item_ipv4(), and
const struct rte_flow_action_mark *mark = action->conf;
int ret;
+ if (is_tunnel_offload_active(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "no mark action "
+ "if tunnel offload active");
/* Fall back if no extended metadata register support. */
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
return mlx5_flow_validate_action_mark(action, action_flags,
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
* @param[out] error
* Pointer to error structure.
*
*/
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_count *count;
if (!priv->config.devx)
goto notsup_err;
+ if (action_flags & MLX5_FLOW_ACTION_COUNT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "duplicate count actions set");
+ count = (const struct rte_flow_action_count *)action->conf;
+ if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
+ !priv->sh->flow_hit_aso_en)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "old age and shared count combination is not supported");
#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
return 0;
#endif
* Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
static int
flow_dv_validate_action_decap(struct rte_eth_dev *dev,
uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"decap action for VF representor "
"not supported on NIC table");
+ if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
+ !(item_flags & MLX5_FLOW_LAYER_VXLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "VXLAN item should be present for VXLAN decap");
return 0;
}
* Holds the actions detected until now.
* @param[out] actions_n
* pointer to the number of actions counter.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
const struct rte_flow_action_raw_decap *decap,
const struct rte_flow_action_raw_encap *encap,
const struct rte_flow_attr *attr, uint64_t *action_flags,
- int *actions_n, struct rte_flow_error *error)
+ int *actions_n, const struct rte_flow_action *action,
+ uint64_t item_flags, struct rte_flow_error *error)
{
const struct mlx5_priv *priv = dev->data->dev_private;
int ret;
"encap combination");
}
if (decap) {
- ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
- error);
+ ret = flow_dv_validate_action_decap(dev, *action_flags, action,
+ item_flags, attr, error);
if (ret < 0)
return ret;
*action_flags |= MLX5_FLOW_ACTION_DECAP;
"cannot create action");
return NULL;
}
+ cache->idx = idx;
return &cache->entry;
}
"cannot create push vlan action");
return NULL;
}
+ cache->idx = idx;
return &cache->entry;
}
(dev, &res, dev_flow, error);
}
-static int fdb_mirror;
-
/**
* Validate the modify-header actions.
*
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
" modify action");
- if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't support sample action before"
- " modify action for E-Switch"
- " mirroring");
return 0;
}
return ret;
}
+static int
+mlx5_flow_item_field_width(enum rte_flow_field_id field)
+{
+ switch (field) {
+ case RTE_FLOW_FIELD_START:
+ return 32;
+ case RTE_FLOW_FIELD_MAC_DST:
+ case RTE_FLOW_FIELD_MAC_SRC:
+ return 48;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ return 12;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ return 8;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ case RTE_FLOW_FIELD_IPV4_DST:
+ return 32;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ return 8;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ case RTE_FLOW_FIELD_IPV6_DST:
+ return 128;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ return 32;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ return 6;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ return 24;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ case RTE_FLOW_FIELD_TAG:
+ return 32;
+ case RTE_FLOW_FIELD_MARK:
+ return 24;
+ case RTE_FLOW_FIELD_META:
+ case RTE_FLOW_FIELD_POINTER:
+ case RTE_FLOW_FIELD_VALUE:
+ return 32;
+ default:
+ MLX5_ASSERT(false);
+ }
+ return 0;
+}
+
+/**
+ * Validate the generic modify field actions.
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Number of header fields to modify (0 or more) on success,
+ * a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
+ const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_action_modify_field *action_modify_field =
+ action->conf;
+ uint32_t dst_width =
+ mlx5_flow_item_field_width(action_modify_field->dst.field);
+ uint32_t src_width =
+ mlx5_flow_item_field_width(action_modify_field->src.field);
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (ret)
+ return ret;
+
+ if (action_modify_field->width == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no bits are requested to be modified");
+ else if (action_modify_field->width > dst_width ||
+ action_modify_field->width > src_width)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot modify more bits than"
+ " the width of a field");
+ if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
+ action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
+ if ((action_modify_field->dst.offset +
+ action_modify_field->width > dst_width) ||
+ (action_modify_field->dst.offset % 32))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "destination offset is too big"
+ " or not aligned to 4 bytes");
+ if (action_modify_field->dst.level &&
+ action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot modify inner headers");
+ }
+ if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
+ action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
+ if (!attr->transfer && !attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "modify field action "
+ "is not supported for group 0");
+ if ((action_modify_field->src.offset +
+ action_modify_field->width > src_width) ||
+ (action_modify_field->src.offset % 32))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "source offset is too big"
+ " or not aligned to 4 bytes");
+ if (action_modify_field->src.level &&
+ action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot copy from inner headers");
+ }
+ if (action_modify_field->dst.field ==
+ action_modify_field->src.field)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "source and destination fields"
+ " cannot be the same");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
+ action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "immediate value or a pointer to it"
+ " cannot be used as a destination");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_START)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of an arbitrary"
+ " place in a packet is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the 802.1Q Tag"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the VXLAN Network"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the GENEVE Network"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_MARK) {
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "cannot modify mark without extended"
+ " metadata register support");
+ }
+ if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "add and sub operations"
+ " are not supported");
+ return (action_modify_field->width / 32) +
+ !!(action_modify_field->width % 32);
+}
+
/**
* Validate jump action.
*
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"jump with meter not support");
- if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "E-Switch mirroring can't support"
- " Sample action and jump action in"
- " same flow now");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
/**
* Validate the sample action.
*
- * @param[in] action_flags
+ * @param[in, out] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the sample action.
* Pointer to the Ethernet device structure.
* @param[in] attr
* Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[in] rss
+ * Pointer to the RSS action.
+ * @param[out] sample_rss
+ * Pointer to the RSS action in sample action list.
+ * @param[out] count
+ * Pointer to the COUNT action in sample action list.
+ * @param[out] fdb_mirror_limit
+ * Pointer to the FDB mirror limitation flag.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_sample(uint64_t action_flags,
+flow_dv_validate_action_sample(uint64_t *action_flags,
const struct rte_flow_action *action,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ const struct rte_flow_action_rss *rss,
+ const struct rte_flow_action_rss **sample_rss,
+ const struct rte_flow_action_count **count,
+ int *fdb_mirror_limit,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint16_t queue_index = 0xFFFF;
int actions_n = 0;
int ret;
- fdb_mirror = 0;
if (!sample)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"sample action not supported");
- if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Multiple sample actions not "
"supported");
- if (action_flags & MLX5_FLOW_ACTION_METER)
+ if (*action_flags & MLX5_FLOW_ACTION_METER)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, meter should "
"be after sample action");
- if (action_flags & MLX5_FLOW_ACTION_JUMP)
+ if (*action_flags & MLX5_FLOW_ACTION_JUMP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, jump should "
sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
++actions_n;
break;
- case RTE_FLOW_ACTION_TYPE_MARK:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ *sample_rss = act->conf;
+ ret = mlx5_flow_validate_action_rss(act,
+ sub_action_flags,
+ dev, attr,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (rss && *sample_rss &&
+ ((*sample_rss)->level != rss->level ||
+ (*sample_rss)->types != rss->types))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Can't use the different RSS types "
+ "or level in the same flow");
+ if (*sample_rss != NULL && (*sample_rss)->queue_num)
+ queue_index = (*sample_rss)->queue[0];
+ sub_action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
ret = flow_dv_validate_action_mark(dev, act,
sub_action_flags,
attr, error);
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = flow_dv_validate_action_count(dev, error);
+ ret = flow_dv_validate_action_count
+ (dev, act,
+ *action_flags | sub_action_flags,
+ error);
if (ret < 0)
return ret;
+ *count = act->conf;
sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
+ *action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
ret = flow_dv_validate_action_raw_encap_decap
(dev, NULL, act->conf, attr, &sub_action_flags,
- &actions_n, error);
+ &actions_n, action, item_flags, error);
+ if (ret < 0)
+ return ret;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = flow_dv_validate_action_l2_encap(dev,
+ sub_action_flags,
+ act, attr,
+ error);
if (ret < 0)
return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
++actions_n;
break;
default:
}
}
if (attr->ingress && !attr->transfer) {
- if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
+ if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"E-Switch doesn't support "
"any optional action "
"for sampling");
- fdb_mirror = 1;
if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action QUEUE");
+ if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"E-Switch must has a dest "
"port for mirroring");
+ if (!priv->config.hca_attr.reg_c_preserve &&
+ priv->representor_id != -1)
+ *fdb_mirror_limit = 1;
}
/* Continue validation for Xcap actions.*/
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
+ uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
int ret = 0;
#ifndef HAVE_MLX5DV_DR
if (!table)
ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
#endif
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
+ if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
+ attributes->priority > lowest_priority)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
uint16_t ether_type = 0;
int actions_n = 0;
uint8_t item_ipv6_proto = 0;
+ int fdb_mirror_limit = 0;
+ int modify_after_mirror = 0;
+ const struct rte_flow_item *geneve_item = NULL;
const struct rte_flow_item *gre_item = NULL;
+ const struct rte_flow_item *gtp_item = NULL;
const struct rte_flow_action_raw_decap *decap;
const struct rte_flow_action_raw_encap *encap;
- const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action_rss *rss = NULL;
+ const struct rte_flow_action_rss *sample_rss = NULL;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_count *sample_count = NULL;
const struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
const struct rte_flow_item_vlan *vlan_m = NULL;
- int16_t rw_act_num = 0;
+ uint32_t rw_act_num = 0;
uint64_t is_root;
const struct mlx5_flow_tunnel *tunnel;
struct flow_grp_info grp_info = {
error);
if (ret < 0)
return ret;
+ geneve_item = items;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ ret = mlx5_flow_validate_item_geneve_opt(items,
+ last_item,
+ geneve_item,
+ dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
item_flags,
error);
if (ret < 0)
return ret;
+ gtp_item = items;
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ ret = flow_dv_validate_item_gtp_psc(items, last_item,
+ gtp_item, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ break;
case RTE_FLOW_ITEM_TYPE_ECPRI:
/* Capacity will be checked in the translate stage. */
ret = mlx5_flow_validate_item_ecpri(items, item_flags,
++actions_n;
action_flags |= MLX5_FLOW_ACTION_FLAG |
MLX5_FLOW_ACTION_MARK_EXT;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
+
} else {
action_flags |= MLX5_FLOW_ACTION_FLAG;
++actions_n;
++actions_n;
action_flags |= MLX5_FLOW_ACTION_MARK |
MLX5_FLOW_ACTION_MARK_EXT;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
} else {
action_flags |= MLX5_FLOW_ACTION_MARK;
++actions_n;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_META;
rw_act_num += MLX5_ACT_NUM_SET_META;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
error);
if (ret < 0)
return ret;
+ if (rss && sample_rss &&
+ (sample_rss->level != rss->level ||
+ sample_rss->types != rss->types))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Can't use the different RSS types "
+ "or level in the same flow");
if (rss != NULL && rss->queue_num)
queue_index = rss->queue[0];
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = flow_dv_validate_action_count(dev, error);
+ ret = flow_dv_validate_action_count(dev, actions,
+ action_flags,
+ error);
if (ret < 0)
return ret;
+ count = actions->conf;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
ret = flow_dv_validate_action_decap(dev, action_flags,
+ actions, item_flags,
attr, error);
if (ret < 0)
return ret;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
ret = flow_dv_validate_action_raw_encap_decap
(dev, NULL, actions->conf, attr, &action_flags,
- &actions_n, error);
+ &actions_n, actions, item_flags, error);
if (ret < 0)
return ret;
break;
(dev,
decap ? decap : &empty_decap, encap,
attr, &action_flags, &actions_n,
- error);
+ actions, item_flags, error);
if (ret < 0)
return ret;
break;
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
MLX5_FLOW_ACTION_SET_MAC_SRC :
MLX5_FLOW_ACTION_SET_MAC_DST;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/*
* Even if the source and destination MAC addresses have
* overlap in the header with 4B alignment, the convert
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
MLX5_FLOW_ACTION_SET_IPV4_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
MLX5_FLOW_ACTION_SET_IPV6_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
MLX5_FLOW_ACTION_SET_TP_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TTL ?
MLX5_FLOW_ACTION_SET_TTL :
error);
if (ret)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
+ fdb_mirror_limit)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "sample and jump action combination is not supported");
++actions_n;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
MLX5_FLOW_ACTION_INC_TCP_SEQ :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
MLX5_FLOW_ACTION_INC_TCP_ACK :
error);
if (ret < 0)
return ret;
+ /*
+ * Validate the regular AGE action (using counter)
+ * mutual exclusion with share counter actions.
+ */
+ if (!priv->sh->flow_hit_aso_en) {
+ if (count && count->shared)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "old age and shared count combination is not supported");
+ if (sample_count)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "old age action and count must be in the same sub flow");
+ }
action_flags |= MLX5_FLOW_ACTION_AGE;
++actions_n;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
case RTE_FLOW_ACTION_TYPE_SAMPLE:
- ret = flow_dv_validate_action_sample(action_flags,
+ ret = flow_dv_validate_action_sample(&action_flags,
actions, dev,
- attr, error);
+ attr, item_flags,
+ rss, &sample_rss,
+ &sample_count,
+ &fdb_mirror_limit,
+ error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ ret = flow_dv_validate_action_modify_field(dev,
+ action_flags,
+ actions,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ rw_act_num += ret;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
* Validate the drop action mutual exclusion with other actions.
* Drop action is mutually-exclusive with any other action, except for
* Count action.
+ * Drop action compatibility with tunnel offload was already validated.
*/
- if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_TUNNEL_MATCH));
+ else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
(action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev))
rw_act_num += MLX5_ACT_NUM_SET_TAG;
- if ((uint32_t)rw_act_num >
+ if (rw_act_num >
flow_dv_modify_hdr_action_max(dev, is_root)) {
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "too many header modify"
" actions to support");
}
+ /* Eswitch egress mirror and modify flow has limitation on CX5 */
+ if (fdb_mirror_limit && modify_after_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "sample before modify action is not supported");
return 0;
}
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
}
+/**
+ * Create Geneve TLV option resource.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+
+int
+flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+ sh->geneve_tlv_option_resource;
+ struct mlx5_devx_obj *obj;
+ const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
+ int ret = 0;
+
+ if (!geneve_opt_v)
+ return -1;
+ rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+ if (geneve_opt_resource != NULL) {
+ if (geneve_opt_resource->option_class ==
+ geneve_opt_v->option_class &&
+ geneve_opt_resource->option_type ==
+ geneve_opt_v->option_type &&
+ geneve_opt_resource->length ==
+ geneve_opt_v->option_len) {
+ /* We already have GENVE TLV option obj allocated. */
+ __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ } else {
+ ret = rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Only one GENEVE TLV option supported");
+ goto exit;
+ }
+ } else {
+ /* Create a GENEVE TLV object and resource. */
+ obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+ geneve_opt_v->option_class,
+ geneve_opt_v->option_type,
+ geneve_opt_v->option_len);
+ if (!obj) {
+ ret = rte_flow_error_set(error, ENODATA,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create GENEVE TLV Devx object");
+ goto exit;
+ }
+ sh->geneve_tlv_option_resource =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*geneve_opt_resource),
+ 0, SOCKET_ID_ANY);
+ if (!sh->geneve_tlv_option_resource) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ ret = rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "GENEVE TLV object memory allocation failed");
+ goto exit;
+ }
+ geneve_opt_resource = sh->geneve_tlv_option_resource;
+ geneve_opt_resource->obj = obj;
+ geneve_opt_resource->option_class = geneve_opt_v->option_class;
+ geneve_opt_resource->option_type = geneve_opt_v->option_type;
+ geneve_opt_resource->length = geneve_opt_v->option_len;
+ __atomic_store_n(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
+exit:
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+ return ret;
+}
+
+/**
+ * Add Geneve TLV option item to matcher.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[out] error
+ * Pointer to error structure.
+ */
+static int
+flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
+ const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ rte_be32_t opt_data_key = 0, opt_data_mask = 0;
+ int ret = 0;
+
+ if (!geneve_opt_v)
+ return -1;
+ if (!geneve_opt_m)
+ geneve_opt_m = &rte_flow_item_geneve_opt_mask;
+ ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
+ error);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
+ return ret;
+ }
+ /*
+ * Set the option length in GENEVE header if not requested.
+ * The GENEVE TLV option length is expressed by the option length field
+ * in the GENEVE header.
+ * If the option length was not requested but the GENEVE TLV option item
+ * is present we set the option length field implicitly.
+ */
+ if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_MASK);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+ geneve_opt_v->option_len + 1);
+ }
+ /* Set the data. */
+ if (geneve_opt_v->data) {
+ memcpy(&opt_data_key, geneve_opt_v->data,
+ RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+ sizeof(opt_data_key)));
+ MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
+ sizeof(opt_data_key));
+ memcpy(&opt_data_mask, geneve_opt_m->data,
+ RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+ sizeof(opt_data_mask)));
+ MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
+ sizeof(opt_data_mask));
+ MLX5_SET(fte_match_set_misc3, misc3_m,
+ geneve_tlv_option_0_data,
+ rte_be_to_cpu_32(opt_data_mask));
+ MLX5_SET(fte_match_set_misc3, misc3_v,
+ geneve_tlv_option_0_data,
+ rte_be_to_cpu_32(opt_data_key & opt_data_mask));
+ }
+ return ret;
+}
+
/**
* Add MPLS item to matcher and to the value.
*
priv->pf_bond < 0 && attr->transfer)
flow_dv_translate_item_source_vport
(matcher, key, priv->vport_id, mask);
- else
- flow_dv_translate_item_meta_vport
- (matcher, key,
- priv->vport_meta_tag,
- priv->vport_meta_mask);
+ /*
+ * We should always set the vport metadata register,
+ * otherwise the SW steering library can drop
+ * the rule if wire vport metadata value is not zero,
+ * it depends on kernel configuration.
+ */
+ flow_dv_translate_item_meta_vport(matcher, key,
+ priv->vport_meta_tag,
+ priv->vport_meta_mask);
} else {
flow_dv_translate_item_source_vport(matcher, key,
priv->vport_id, mask);
rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
}
+/**
+ * Add GTP PSC item to matcher.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static int
+flow_dv_translate_item_gtp_psc(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
+ const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ union {
+ uint32_t w32;
+ struct {
+ uint16_t seq_num;
+ uint8_t npdu_num;
+ uint8_t next_ext_header_type;
+ };
+ } dw_2;
+ uint8_t gtp_flags;
+
+ /* Always set E-flag match on one, regardless of GTP item settings. */
+ gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
+ gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
+ gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
+ gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
+ /*Set next extension header type. */
+ dw_2.seq_num = 0;
+ dw_2.npdu_num = 0;
+ dw_2.next_ext_header_type = 0xff;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
+ rte_cpu_to_be_32(dw_2.w32));
+ dw_2.seq_num = 0;
+ dw_2.npdu_num = 0;
+ dw_2.next_ext_header_type = 0x85;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
+ rte_cpu_to_be_32(dw_2.w32));
+ if (gtp_psc_v) {
+ union {
+ uint32_t w32;
+ struct {
+ uint8_t len;
+ uint8_t type_flags;
+ uint8_t qfi;
+ uint8_t reserved;
+ };
+ } dw_0;
+
+ /*Set extension header PDU type and Qos. */
+ if (!gtp_psc_m)
+ gtp_psc_m = &rte_flow_item_gtp_psc_mask;
+ dw_0.w32 = 0;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
+ dw_0.qfi = gtp_psc_m->qfi;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
+ rte_cpu_to_be_32(dw_0.w32));
+ dw_0.w32 = 0;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
+ gtp_psc_m->pdu_type);
+ dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
+ rte_cpu_to_be_32(dw_0.w32));
+ }
+ return 0;
+}
+
/**
* Add eCPRI item to matcher and to the value.
*
flow_dv_tag_release(dev, act_res->rix_tag);
act_res->rix_tag = 0;
}
- if (act_res->cnt) {
- flow_dv_counter_free(dev, act_res->cnt);
- act_res->cnt = 0;
+ if (act_res->rix_jump) {
+ flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
+ act_res->rix_jump = 0;
}
}
"for sample");
goto error;
}
- int ret;
-
cache_resource->normal_path_tbl = tbl;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
- ret = mlx5_flow_os_create_flow_action_default_miss
- (&cache_resource->default_miss);
- if (!ret) {
+ if (!sh->default_miss_action) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot create default miss "
- "action");
+ "default miss action was not "
+ "created");
goto error;
}
sample_dv_actions[resource->sample_act.actions_num++] =
- cache_resource->default_miss;
+ sh->default_miss_action;
}
/* Create a DR sample action */
sampler_attr.sample_ratio = cache_resource->ratio;
sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
&sample_dv_actions[0];
sampler_attr.action = cache_resource->set_action;
- cache_resource->verbs_action =
- mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
- if (!cache_resource->verbs_action) {
+ if (mlx5_os_flow_dr_create_flow_action_sampler
+ (&sampler_attr, &cache_resource->verbs_action)) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create sample action");
cache_resource->dev = dev;
return &cache_resource->entry;
error:
- if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
- cache_resource->default_miss)
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->default_miss));
- else
+ if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
flow_dv_sample_sub_actions_release(dev,
&cache_resource->sample_idx);
if (cache_resource->normal_path_tbl)
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0, res_idx = 0;
struct rte_flow_error *error = ctx->error;
+ uint64_t action_flags;
+ int ret;
/* Register new destination array resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
}
dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
sample_act = &resource->sample_act[idx];
- if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+ action_flags = sample_act->action_flags;
+ switch (action_flags) {
+ case MLX5_FLOW_ACTION_QUEUE:
dest_attr[idx]->dest = sample_act->dr_queue_action;
- } else if (sample_act->action_flags ==
- (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+ break;
+ case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
dest_attr[idx]->dest_reformat = &dest_reformat[idx];
dest_attr[idx]->dest_reformat->reformat =
sample_act->dr_encap_action;
dest_attr[idx]->dest_reformat->dest =
sample_act->dr_port_id_action;
- } else if (sample_act->action_flags ==
- MLX5_FLOW_ACTION_PORT_ID) {
+ break;
+ case MLX5_FLOW_ACTION_PORT_ID:
dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ break;
+ case MLX5_FLOW_ACTION_JUMP:
+ dest_attr[idx]->dest = sample_act->dr_jump_action;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported actions type");
+ goto error;
}
}
/* create a dest array actioin */
- cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+ ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
cache_resource->num_of_dest,
- dest_attr);
- if (!cache_resource->action) {
+ dest_attr,
+ &cache_resource->action);
+ if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
!flow_dv_port_id_action_resource_release(dev,
act_res->rix_port_id_action))
act_res->rix_port_id_action = 0;
+ if (act_res->rix_jump &&
+ !flow_dv_jump_tbl_resource_release(dev,
+ act_res->rix_jump))
+ act_res->rix_jump = 0;
if (dest_attr[idx])
mlx5_free(dest_attr[idx]);
}
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
- * Pointer to action structure.
+ * Pointer to sample action structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
* @param[in] attr
*/
static int
flow_dv_translate_action_sample(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
+ const struct rte_flow_action_sample *action,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
uint32_t *num_of_dest,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const struct rte_flow_action_sample *sample_action;
const struct rte_flow_action *sub_actions;
- const struct rte_flow_action_queue *queue;
struct mlx5_flow_sub_actions_list *sample_act;
struct mlx5_flow_sub_actions_idx *sample_idx;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct rte_flow *flow = dev_flow->flow;
struct mlx5_flow_rss_desc *rss_desc;
uint64_t action_flags = 0;
rss_desc = &wks->rss_desc;
sample_act = &res->sample_act;
sample_idx = &res->sample_idx;
- sample_action = (const struct rte_flow_action_sample *)action->conf;
- res->ratio = sample_action->ratio;
- sub_actions = sample_action->actions;
+ res->ratio = action->ratio;
+ sub_actions = action->actions;
for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
int type = sub_actions->type;
uint32_t pre_rix = 0;
switch (type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
{
+ const struct rte_flow_action_queue *queue;
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
MLX5_FLOW_FATE_QUEUE;
break;
}
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+ const struct rte_flow_action_rss *rss;
+ const uint8_t *rss_key;
+
+ rss = sub_actions->conf;
+ memcpy(rss_desc->queue, rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance
+ * when expanding items for RSS.
+ */
+ flow_dv_hashfields_set(dev_flow, rss_desc);
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ }
case RTE_FLOW_ACTION_TYPE_MARK:
{
uint32_t tag_be = mlx5_flow_mark_set
}
case RTE_FLOW_ACTION_TYPE_COUNT:
{
- uint32_t counter;
-
- counter = flow_dv_translate_create_counter(dev,
- dev_flow, sub_actions->conf, 0);
- if (!counter)
- return rte_flow_error_set
+ if (!flow->counter) {
+ flow->counter =
+ flow_dv_translate_create_counter(dev,
+ dev_flow, sub_actions->conf,
+ 0);
+ if (!flow->counter)
+ return rte_flow_error_set
(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot create counter"
- " object.");
- sample_idx->cnt = counter;
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ }
sample_act->dr_cnt_action =
(flow_dv_counter_get_by_idx(dev,
- counter, NULL))->action;
+ flow->counter, NULL))->action;
sample_actions[sample_act->actions_num++] =
sample_act->dr_cnt_action;
action_flags |= MLX5_FLOW_ACTION_COUNT;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
}
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* Save the encap resource before sample */
pre_rix = dev_flow->handle->dvh.rix_encap_decap;
sample_act->dr_port_id_action =
dev_flow->dv.port_id_action->action;
}
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_jump =
+ dev_flow->handle->rix_jump;
+ sample_act->dr_jump_action =
+ dev_flow->dv.jump->action;
+ dev_flow->handle->rix_jump = 0;
+ }
sample_act->actions_num = normal_idx;
/* update sample action resource into first index of array */
mdest_res->ft_type = res->ft_type;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
- uint64_t priority = attr->priority;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
.size = sizeof(matcher.mask.buf) -
struct mlx5_flow_dv_dest_array_resource mdest_res;
struct mlx5_flow_dv_sample_resource sample_res;
void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ const struct rte_flow_action_sample *sample = NULL;
struct mlx5_flow_sub_actions_list *sample_act;
uint32_t sample_act_pos = UINT32_MAX;
uint32_t num_of_dest = 0;
.external = !!dev_flow->external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
- .skip_scale = !!dev_flow->skip_scale,
+ .skip_scale = dev_flow->skip_scale &
+ (1 << MLX5_SCALE_FLOW_GROUP_BIT),
};
if (!wks)
dev_flow->dv.group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
- if (priority == MLX5_FLOW_PRIO_RSVD)
- priority = dev_conf->flow_prio - 1;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
break;
case RTE_FLOW_ACTION_TYPE_AGE:
if (priv->sh->flow_hit_aso_en && attr->group) {
- flow->age = flow_dv_translate_create_aso_age
- (dev, action->conf, error);
- if (!flow->age)
- return rte_flow_error_set
+ /*
+ * Create one shared age action, to be used
+ * by all sub-flows.
+ */
+ if (!flow->age) {
+ flow->age =
+ flow_dv_translate_create_aso_age
+ (dev, action->conf,
+ error);
+ if (!flow->age)
+ return rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"can't create ASO age action");
+ }
dev_flow->dv.actions[actions_n++] =
(flow_aso_age_get_by_idx
(dev, flow->age))->dr_action;
jump_group = ((const struct rte_flow_action_jump *)
action->conf)->group;
grp_info.std_tbl_fix = 0;
- grp_info.skip_scale = 0;
+ if (dev_flow->skip_scale &
+ (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
+ grp_info.skip_scale = 1;
+ else
+ grp_info.skip_scale = 0;
ret = mlx5_flow_group_to_table(dev, tunnel,
jump_group,
&table,
dev_flow->dv.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
break;
case RTE_FLOW_ACTION_TYPE_SAMPLE:
sample_act_pos = actions_n;
- ret = flow_dv_translate_action_sample(dev,
- actions,
- dev_flow, attr,
- &num_of_dest,
- sample_actions,
- &sample_res,
- error);
- if (ret < 0)
- return ret;
+ sample = (const struct rte_flow_action_sample *)
+ action->conf;
actions_n++;
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
/* put encap action into group if work with port id */
sample_act->action_flags |=
MLX5_FLOW_ACTION_ENCAP;
break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ if (flow_dv_convert_action_modify_field
+ (dev, mhdr_res, actions, attr, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
handle->dvh.modify_hdr->action;
}
if (action_flags & MLX5_FLOW_ACTION_COUNT) {
- flow->counter =
- flow_dv_translate_create_counter(dev,
- dev_flow, count, age);
-
- if (!flow->counter)
- return rte_flow_error_set
+ /*
+ * Create one count action, to be used
+ * by all sub-flows.
+ */
+ if (!flow->counter) {
+ flow->counter =
+ flow_dv_translate_create_counter
+ (dev, dev_flow, count,
+ age);
+ if (!flow->counter)
+ return rte_flow_error_set
(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot create counter"
- " object.");
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "cannot create counter"
+ " object.");
+ }
dev_flow->dv.actions[actions_n] =
(flow_dv_counter_get_by_idx(dev,
flow->counter, NULL))->action;
actions_n++;
}
- if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
- ret = flow_dv_create_action_sample(dev,
- dev_flow,
- num_of_dest,
- &sample_res,
- &mdest_res,
- sample_actions,
- action_flags,
- error);
- if (ret < 0)
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot create sample action");
- if (num_of_dest > 1) {
- dev_flow->dv.actions[sample_act_pos] =
- dev_flow->dv.dest_array_res->action;
- } else {
- dev_flow->dv.actions[sample_act_pos] =
- dev_flow->dv.sample_res->verbs_action;
- }
- }
- break;
default:
break;
}
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
- /*
- * For multiple destination (sample action with ratio=1), the encap
- * action and port id action will be combined into group action.
- * So need remove the original these actions in the flow and only
- * use the sample action instead of.
- */
- if (num_of_dest > 1 && sample_act->dr_port_id_action) {
- int i;
- void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
-
- for (i = 0; i < actions_n; i++) {
- if ((sample_act->dr_encap_action &&
- sample_act->dr_encap_action ==
- dev_flow->dv.actions[i]) ||
- (sample_act->dr_port_id_action &&
- sample_act->dr_port_id_action ==
- dev_flow->dv.actions[i]))
- continue;
- temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
- }
- memcpy((void *)dev_flow->dv.actions,
- (void *)temp_actions,
- tmp_actions_n * sizeof(void *));
- actions_n = tmp_actions_n;
- }
- dev_flow->dv.actions_n = actions_n;
- dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
+ match_value,
+ items, error);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GENEVE TLV option");
+ flow->geneve_tlv_option = 1;
+ last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ ret = flow_dv_translate_item_gtp_psc(match_mask,
+ match_value,
+ items);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GTP PSC item");
+ last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ break;
case RTE_FLOW_ITEM_TYPE_ECPRI:
if (!mlx5_flex_parser_ecpri_exist(dev)) {
/* Create it only the first time to be used. */
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow, rss_desc);
+ /* If has RSS action in the sample action, the Sample/Mirror resource
+ * should be registered after the hash filed be update.
+ */
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
+ ret = flow_dv_translate_action_sample(dev,
+ sample,
+ dev_flow, attr,
+ &num_of_dest,
+ sample_actions,
+ &sample_res,
+ error);
+ if (ret < 0)
+ return ret;
+ ret = flow_dv_create_action_sample(dev,
+ dev_flow,
+ num_of_dest,
+ &sample_res,
+ &mdest_res,
+ sample_actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create sample action");
+ if (num_of_dest > 1) {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.dest_array_res->action;
+ } else {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.sample_res->verbs_action;
+ }
+ }
+ /*
+ * For multiple destination (sample action with ratio=1), the encap
+ * action and port id action will be combined into group action.
+ * So need remove the original these actions in the flow and only
+ * use the sample action instead of.
+ */
+ if (num_of_dest > 1 &&
+ (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
+ int i;
+ void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+ for (i = 0; i < actions_n; i++) {
+ if ((sample_act->dr_encap_action &&
+ sample_act->dr_encap_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_port_id_action &&
+ sample_act->dr_port_id_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_jump_action &&
+ sample_act->dr_jump_action ==
+ dev_flow->dv.actions[i]))
+ continue;
+ temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+ }
+ memcpy((void *)dev_flow->dv.actions,
+ (void *)temp_actions,
+ tmp_actions_n * sizeof(void *));
+ actions_n = tmp_actions_n;
+ }
+ dev_flow->dv.actions_n = actions_n;
+ dev_flow->act_flags = action_flags;
/* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
- matcher.priority = mlx5_os_flow_adjust_priority(dev,
- priority,
- matcher.priority);
+ matcher.priority = mlx5_get_matcher_priority(dev, attr,
+ matcher.priority);
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
static int
__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
const uint64_t hash_fields,
- const int tunnel,
uint32_t hrxq_idx)
{
- uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+ uint32_t *hrxqs = action->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_SRC_ONLY:
hrxqs[0] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
hrxqs[1] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
hrxqs[2] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_SRC_ONLY:
hrxqs[3] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
hrxqs[4] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
hrxqs[5] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_NONE:
*/
static uint32_t
__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
- const uint64_t hash_fields,
- const int tunnel)
+ const uint64_t hash_fields)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
- shared_rss->hrxq_tunnel;
+ const uint32_t *hrxqs = shared_rss->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_SRC_ONLY:
return hrxqs[0];
case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
return hrxqs[1];
case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
return hrxqs[2];
case MLX5_RSS_HASH_IPV6:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_SRC_ONLY:
return hrxqs[3];
case MLX5_RSS_HASH_IPV6_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
return hrxqs[4];
case MLX5_RSS_HASH_IPV6_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
default:
return 0;
}
-}
-
-/**
- * Retrieves hash RX queue suitable for the *flow*.
- * If shared action configured for *flow* suitable hash RX queue will be
- * retrieved from attached shared action.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to the sub flow.
- * @param[in] rss_desc
- * Pointer to the RSS descriptor.
- * @param[out] hrxq
- * Pointer to retrieved hash RX queue object.
- *
- * @return
- * Valid hash RX queue index, otherwise 0 and rte_errno is set.
- */
-static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc,
- struct mlx5_hrxq **hrxq)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t hrxq_idx;
- if (rss_desc->shared_rss) {
- hrxq_idx = __flow_dv_action_rss_hrxq_lookup
- (dev, rss_desc->shared_rss,
- dev_flow->hash_fields,
- !!(dev_flow->handle->layers &
- MLX5_FLOW_LAYER_TUNNEL));
- if (hrxq_idx)
- *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- hrxq_idx);
- } else {
- *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
- &hrxq_idx);
- }
- return hrxq_idx;
}
/**
struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
MLX5_ASSERT(wks);
- if (rss_desc->shared_rss) {
- dh = wks->flows[wks->flow_idx - 1].handle;
- MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
- dh->rix_srss = rss_desc->shared_rss;
- }
for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
dev_flow = &wks->flows[idx];
dv = &dev_flow->dv;
n = dv->actions_n;
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
if (dv->transfer) {
- dv->actions[n++] = priv->sh->esw_drop_action;
+ MLX5_ASSERT(priv->sh->dr_drop_action);
+ dv->actions[n++] = priv->sh->dr_drop_action;
} else {
+#ifdef HAVE_MLX5DV_DR
+ /* DR supports drop action placeholder. */
+ MLX5_ASSERT(priv->sh->dr_drop_action);
+ dv->actions[n++] = priv->sh->dr_drop_action;
+#else
+ /* For DV we use the explicit drop queue. */
MLX5_ASSERT(priv->drop_queue.hrxq);
dv->actions[n++] =
priv->drop_queue.hrxq->action;
+#endif
}
} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
- !dv_h->rix_sample && !dv_h->rix_dest_array) ||
- (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
+ !dv_h->rix_sample && !dv_h->rix_dest_array)) {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+ &hrxq_idx);
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ dh->rix_hrxq = hrxq_idx;
+ dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
struct mlx5_hrxq *hrxq = NULL;
- uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
- (dev, dev_flow, rss_desc, &hrxq);
+ uint32_t hrxq_idx;
+
+ hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+ rss_desc->shared_rss,
+ dev_flow->hash_fields);
+ if (hrxq_idx)
+ hrxq = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
- dh->rix_hrxq = hrxq_idx;
+ dh->rix_srss = rss_desc->shared_rss;
dv->actions[n++] = hrxq->action;
} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
if (!priv->sh->default_miss_action) {
if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
mlx5_hrxq_release(dev, dh->rix_hrxq);
dh->rix_hrxq = 0;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ dh->rix_srss = 0;
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
- if (rss_desc->shared_rss)
- wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
*
* @param dev
* Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param port_id
+ * Index to port ID action resource.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*
* @param dev
* Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param rix_jump
+ * Index to the jump action resource.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t rix_jump)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
- handle->rix_jump);
+ rix_jump);
if (!tbl_data)
return 0;
return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
mlx5_hrxq_release(dev, handle->rix_hrxq);
break;
case MLX5_FLOW_FATE_JUMP:
- flow_dv_jump_tbl_resource_release(dev, handle);
+ flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
break;
case MLX5_FLOW_FATE_PORT_ID:
flow_dv_port_id_action_resource_release(dev,
handle->rix_port_id_action);
break;
- case MLX5_FLOW_FATE_SHARED_RSS:
- flow_dv_shared_rss_action_release(dev, handle->rix_srss);
- break;
default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
break;
if (cache_resource->verbs_action)
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->verbs_action));
- if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
- if (cache_resource->default_miss)
- claim_zero(mlx5_flow_os_destroy_flow_action
- (cache_resource->default_miss));
- }
if (cache_resource->normal_path_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
cache_resource->normal_path_tbl);
&cache->entry);
}
+static void
+flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+ sh->geneve_tlv_option_resource;
+ rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+ if (geneve_opt_resource) {
+ if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED))) {
+ claim_zero(mlx5_devx_cmd_destroy
+ (geneve_opt_resource->obj));
+ mlx5_free(sh->geneve_tlv_option_resource);
+ sh->geneve_tlv_option_resource = NULL;
+ }
+ }
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
* Lock free, (mutex should be acquired by caller).
{
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t srss = 0;
if (!flow)
return;
}
if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
+ if (flow->geneve_tlv_option) {
+ flow_dv_geneve_tlv_option_resource_release(dev);
+ flow->geneve_tlv_option = 0;
+ }
while (flow->dev_handles) {
uint32_t tmp_idx = flow->dev_handles;
if (dev_handle->dvh.rix_tag)
flow_dv_tag_release(dev,
dev_handle->dvh.rix_tag);
- flow_dv_fate_resource_release(dev, dev_handle);
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
+ flow_dv_fate_resource_release(dev, dev_handle);
+ else if (!srss)
+ srss = dev_handle->rix_srss;
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
}
+ if (srss)
+ flow_dv_shared_rss_action_release(dev, srss);
}
/**
*/
static int
__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
- struct mlx5_shared_action_rss *action)
+ struct mlx5_shared_action_rss *shared_rss)
{
- return __flow_dv_hrxqs_release(dev, &action->hrxq) +
- __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
+ return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
+}
+
+/**
+ * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
+ * user input.
+ *
+ * Only one hash value is available for one L3+L4 combination:
+ * for example:
+ * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
+ * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
+ * same slot in mlx5_rss_hash_fields.
+ *
+ * @param[in] rss
+ * Pointer to the shared action RSS conf.
+ * @param[in, out] hash_field
+ * hash_field variable needed to be adjusted.
+ *
+ * @return
+ * void
+ */
+static void
+__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
+ uint64_t *hash_field)
+{
+ uint64_t rss_types = rss->origin.types;
+
+ switch (*hash_field & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ *hash_field &= ~MLX5_RSS_HASH_IPV4;
+ if (rss_types & ETH_RSS_L3_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_IPV4;
+ else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_IPV4;
+ else
+ *hash_field |= MLX5_RSS_HASH_IPV4;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV6:
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ *hash_field &= ~MLX5_RSS_HASH_IPV6;
+ if (rss_types & ETH_RSS_L3_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_IPV6;
+ else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_IPV6;
+ else
+ *hash_field |= MLX5_RSS_HASH_IPV6;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP:
+ if (rss_types & ETH_RSS_UDP) {
+ *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
+ if (rss_types & ETH_RSS_L4_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
+ else
+ *hash_field |= MLX5_UDP_IBV_RX_HASH;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP:
+ if (rss_types & ETH_RSS_TCP) {
+ *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
+ if (rss_types & ETH_RSS_L4_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
+ else
+ *hash_field |= MLX5_TCP_IBV_RX_HASH;
+ }
+ return;
+ default:
+ return;
+ }
}
/**
static int
__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
uint32_t action_idx,
- struct mlx5_shared_action_rss *action,
+ struct mlx5_shared_action_rss *shared_rss,
struct rte_flow_error *error)
{
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
+ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
}
- memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
+ memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
- rss_desc.const_q = action->origin.queue;
- rss_desc.queue_num = action->origin.queue_num;
+ rss_desc.const_q = shared_rss->origin.queue;
+ rss_desc.queue_num = shared_rss->origin.queue_num;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
- rss_desc.ind_tbl = action->ind_tbl;
+ rss_desc.ind_tbl = shared_rss->ind_tbl;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
- int tunnel;
+ int tunnel = 0;
- for (tunnel = 0; tunnel < 2; tunnel++) {
- rss_desc.tunnel = tunnel;
- rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot get hash queue");
- goto error_hrxq_new;
- }
- err = __flow_dv_action_rss_hrxq_set
- (action, hash_fields, tunnel, hrxq_idx);
- MLX5_ASSERT(!err);
+ __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+ if (shared_rss->origin.level > 1) {
+ hash_fields |= IBV_RX_HASH_INNER;
+ tunnel = 1;
}
+ rss_desc.tunnel = tunnel;
+ rss_desc.hash_fields = hash_fields;
+ hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq_idx) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error_hrxq_new;
+ }
+ err = __flow_dv_action_rss_hrxq_set
+ (shared_rss, hash_fields, hrxq_idx);
+ MLX5_ASSERT(!err);
}
return 0;
error_hrxq_new:
err = rte_errno;
- __flow_dv_action_rss_hrxqs_release(dev, action);
- if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
- action->ind_tbl = NULL;
+ __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+ shared_rss->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_action_rss *shared_action = NULL;
+ struct mlx5_shared_action_rss *shared_rss = NULL;
void *queue = NULL;
struct rte_flow_action_rss *origin;
const uint8_t *rss_key;
RTE_SET_USED(conf);
queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
0, SOCKET_ID_ANY);
- shared_action = mlx5_ipool_zmalloc
+ shared_rss = mlx5_ipool_zmalloc
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
- if (!shared_action || !queue) {
+ if (!shared_rss || !queue) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
"rss action number out of range");
goto error_rss_init;
}
- shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*shared_action->ind_tbl),
- 0, SOCKET_ID_ANY);
- if (!shared_action->ind_tbl) {
+ shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*shared_rss->ind_tbl),
+ 0, SOCKET_ID_ANY);
+ if (!shared_rss->ind_tbl) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
goto error_rss_init;
}
memcpy(queue, rss->queue, queue_size);
- shared_action->ind_tbl->queues = queue;
- shared_action->ind_tbl->queues_n = rss->queue_num;
- origin = &shared_action->origin;
+ shared_rss->ind_tbl->queues = queue;
+ shared_rss->ind_tbl->queues_n = rss->queue_num;
+ origin = &shared_rss->origin;
origin->func = rss->func;
origin->level = rss->level;
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
origin->types = !rss->types ? ETH_RSS_IP : rss->types;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- origin->key = &shared_action->key[0];
+ memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ origin->key = &shared_rss->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
origin->queue = queue;
origin->queue_num = rss->queue_num;
- if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
+ if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
goto error_rss_init;
- rte_spinlock_init(&shared_action->action_rss_sl);
- __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
+ rte_spinlock_init(&shared_rss->action_rss_sl);
+ __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- &priv->rss_shared_actions, idx, shared_action, next);
+ &priv->rss_shared_actions, idx, shared_rss, next);
rte_spinlock_unlock(&priv->shared_act_sl);
return idx;
error_rss_init:
- if (shared_action) {
- if (shared_action->ind_tbl)
- mlx5_free(shared_action->ind_tbl);
+ if (shared_rss) {
+ if (shared_rss->ind_tbl)
+ mlx5_free(shared_rss->ind_tbl);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
}
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
+ if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
+ 0, 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED))
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss has references");
queue = shared_rss->ind_tbl->queues;
remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
if (remaining)
NULL,
"shared rss indirection table has"
" references");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
- return rte_flow_error_set(error, EBUSY,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "shared rss has references");
mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action to update");
+ if (priv->obj_ops.ind_table_modify == NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot modify indirection table");
queue = mlx5_malloc(MLX5_MEM_ZERO,
RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
0, SOCKET_ID_ANY);
&actions[0]);
if (ret)
goto err;
- actions[1] = priv->drop_queue.hrxq->action;
+ actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
+ priv->drop_queue.hrxq->action;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&matcher);
RTE_SET_USED(conf);
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
+ /*
+ * priv->obj_ops is set according to driver capabilities.
+ * When DevX capabilities are
+ * sufficient, it is set to devx_obj_ops.
+ * Otherwise, it is set to ibv_obj_ops.
+ * ibv_obj_ops doesn't support ind_table_modify operation.
+ * In this case the shared RSS action can't be used.
+ */
+ if (priv->obj_ops.ind_table_modify == NULL)
+ return rte_flow_error_set
+ (err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared RSS action not supported");
return mlx5_validate_action_rss(dev, action, err);
case RTE_FLOW_ACTION_TYPE_AGE:
if (!priv->sh->aso_age_mng)