#include "mlx5_common_os.h"
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
#include "rte_pmd_mlx5.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
MLX5_MODIFICATION_TYPE_SET, error);
}
+static int
+mlx5_flow_item_field_width(enum rte_flow_field_id field)
+{
+ switch (field) {
+ case RTE_FLOW_FIELD_START:
+ return 32;
+ case RTE_FLOW_FIELD_MAC_DST:
+ case RTE_FLOW_FIELD_MAC_SRC:
+ return 48;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ return 12;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ return 8;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ case RTE_FLOW_FIELD_IPV4_DST:
+ return 32;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ return 8;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ case RTE_FLOW_FIELD_IPV6_DST:
+ return 128;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ return 32;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ return 6;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ return 24;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ case RTE_FLOW_FIELD_TAG:
+ return 32;
+ case RTE_FLOW_FIELD_MARK:
+ return 24;
+ case RTE_FLOW_FIELD_META:
+ return 32;
+ case RTE_FLOW_FIELD_POINTER:
+ case RTE_FLOW_FIELD_VALUE:
+ return 64;
+ default:
+ MLX5_ASSERT(false);
+ }
+ return 0;
+}
+
static void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
struct field_modify_info *info,
- uint32_t *mask, uint32_t *value, uint32_t width,
+ uint32_t *mask, uint32_t *value,
+ uint32_t width, uint32_t dst_width,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
uint32_t idx = 0;
+ uint64_t val = 0;
switch (data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
}
info[idx] = (struct field_modify_info){2, 4 * idx,
MLX5_MODI_OUT_DMAC_15_0};
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
}
info[idx] = (struct field_modify_info){2, 4 * idx,
MLX5_MODI_OUT_SMAC_15_0};
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_FIRST_VID};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x00000fff >>
- (12 - width));
+ mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
break;
case RTE_FLOW_FIELD_MAC_TYPE:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_ETHERTYPE};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_IPV4_DSCP:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_IPV4_TTL:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV4_TTL};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x000000ff >>
- (8 - width));
+ mask[idx] = 0xff >> (8 - width);
break;
case RTE_FLOW_FIELD_IPV4_SRC:
info[idx] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV6_HOPLIMIT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x000000ff >>
- (8 - width));
+ mask[idx] = 0xff >> (8 - width);
break;
case RTE_FLOW_FIELD_IPV6_SRC:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_127_96};
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
if (data->offset < 64) {
info[idx] = (struct field_modify_info){4,
4 * idx,
- MLX5_MODI_OUT_SIPV6_95_64};
+ MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
}
if (data->offset < 96) {
info[idx] = (struct field_modify_info){4,
- 8 * idx,
- MLX5_MODI_OUT_SIPV6_63_32};
+ 4 * idx,
+ MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 12 * idx,
- MLX5_MODI_OUT_SIPV6_31_0};
+ info[idx] = (struct field_modify_info){4, 4 * idx,
+ MLX5_MODI_OUT_SIPV6_127_96};
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_127_96};
+ MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_95_64};
+ MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_63_32};
+ MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_31_0};
+ MLX5_MODI_OUT_SIPV6_127_96};
}
break;
case RTE_FLOW_FIELD_IPV6_DST:
if (mask) {
if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_127_96};
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
if (data->offset < 64) {
info[idx] = (struct field_modify_info){4,
4 * idx,
- MLX5_MODI_OUT_DIPV6_95_64};
+ MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
}
if (data->offset < 96) {
info[idx] = (struct field_modify_info){4,
- 8 * idx,
- MLX5_MODI_OUT_DIPV6_63_32};
+ 4 * idx,
+ MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
break;
++idx;
}
- info[idx] = (struct field_modify_info){4, 12 * idx,
- MLX5_MODI_OUT_DIPV6_31_0};
+ info[idx] = (struct field_modify_info){4, 4 * idx,
+ MLX5_MODI_OUT_DIPV6_127_96};
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_127_96};
+ MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_95_64};
+ MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_63_32};
+ MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_31_0};
+ MLX5_MODI_OUT_DIPV6_127_96};
}
break;
case RTE_FLOW_FIELD_TCP_PORT_SRC:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_TCP_PORT_DST:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_TCP_SEQ_NUM:
info[idx] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_TCP_FLAGS};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_UDP_PORT_SRC:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_UDP_PORT_DST:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_VXLAN_VNI:
/* not supported yet */
}
break;
case RTE_FLOW_FIELD_POINTER:
- for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
- if (mask[idx]) {
- memcpy(&value[idx],
- (void *)(uintptr_t)data->value, 32);
- value[idx] = rte_cpu_to_be_32(value[idx]);
- break;
- }
- }
- break;
case RTE_FLOW_FIELD_VALUE:
+ if (data->field == RTE_FLOW_FIELD_POINTER)
+ memcpy(&val, (void *)(uintptr_t)data->value,
+ sizeof(uint64_t));
+ else
+ val = data->value;
for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
if (mask[idx]) {
- value[idx] =
- rte_cpu_to_be_32((uint32_t)data->value);
- break;
+ if (dst_width > 16) {
+ value[idx] = rte_cpu_to_be_32(val);
+ val >>= 32;
+ } else if (dst_width > 8) {
+ value[idx] = rte_cpu_to_be_16(val);
+ val >>= 16;
+ } else {
+ value[idx] = (uint8_t)val;
+ val >>= 8;
+ }
+ if (!val)
+ break;
}
}
break;
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t type;
+ uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = MLX5_MODIFICATION_TYPE_SET;
/** For SET fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
/** Then copy immediate value from source as per mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
item.spec = &value;
} else {
type = MLX5_MODIFICATION_TYPE_COPY;
/** For COPY fill the destination field (dcopy) without mask. */
mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
/** Then construct the source field (field) with mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
}
item.mask = &mask;
return flow_dv_convert_modify_action(&item,
return ret;
}
-static int
-mlx5_flow_item_field_width(enum rte_flow_field_id field)
-{
- switch (field) {
- case RTE_FLOW_FIELD_START:
- return 32;
- case RTE_FLOW_FIELD_MAC_DST:
- case RTE_FLOW_FIELD_MAC_SRC:
- return 48;
- case RTE_FLOW_FIELD_VLAN_TYPE:
- return 16;
- case RTE_FLOW_FIELD_VLAN_ID:
- return 12;
- case RTE_FLOW_FIELD_MAC_TYPE:
- return 16;
- case RTE_FLOW_FIELD_IPV4_DSCP:
- return 6;
- case RTE_FLOW_FIELD_IPV4_TTL:
- return 8;
- case RTE_FLOW_FIELD_IPV4_SRC:
- case RTE_FLOW_FIELD_IPV4_DST:
- return 32;
- case RTE_FLOW_FIELD_IPV6_DSCP:
- return 6;
- case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
- return 8;
- case RTE_FLOW_FIELD_IPV6_SRC:
- case RTE_FLOW_FIELD_IPV6_DST:
- return 128;
- case RTE_FLOW_FIELD_TCP_PORT_SRC:
- case RTE_FLOW_FIELD_TCP_PORT_DST:
- return 16;
- case RTE_FLOW_FIELD_TCP_SEQ_NUM:
- case RTE_FLOW_FIELD_TCP_ACK_NUM:
- return 32;
- case RTE_FLOW_FIELD_TCP_FLAGS:
- return 6;
- case RTE_FLOW_FIELD_UDP_PORT_SRC:
- case RTE_FLOW_FIELD_UDP_PORT_DST:
- return 16;
- case RTE_FLOW_FIELD_VXLAN_VNI:
- case RTE_FLOW_FIELD_GENEVE_VNI:
- return 24;
- case RTE_FLOW_FIELD_GTP_TEID:
- case RTE_FLOW_FIELD_TAG:
- return 32;
- case RTE_FLOW_FIELD_MARK:
- return 24;
- case RTE_FLOW_FIELD_META:
- case RTE_FLOW_FIELD_POINTER:
- case RTE_FLOW_FIELD_VALUE:
- return 32;
- default:
- MLX5_ASSERT(false);
- }
- return 0;
-}
-
/**
* Validate the generic modify field actions.
* @param[in] dev
if (action_modify_field->width == 0)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "no bits are requested to be modified");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no bits are requested to be modified");
else if (action_modify_field->width > dst_width ||
action_modify_field->width > src_width)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot modify more bits than"
- " the width of a field");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "cannot modify more bits than"
+ " the width of a field");
if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
if ((action_modify_field->dst.offset +
action_modify_field->width > dst_width) ||
(action_modify_field->dst.offset % 32))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "destination offset is too big"
- " or not aligned to 4 bytes");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "destination offset is too big"
+ " or not aligned to 4 bytes");
if (action_modify_field->dst.level &&
action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot modify inner headers");
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "inner header fields modification"
+ " is not supported");
}
if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
if (!attr->transfer && !attr->group)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "modify field action "
- "is not supported for group 0");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "modify field action is not"
+ " supported for group 0");
if ((action_modify_field->src.offset +
action_modify_field->width > src_width) ||
(action_modify_field->src.offset % 32))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "source offset is too big"
- " or not aligned to 4 bytes");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "source offset is too big"
+ " or not aligned to 4 bytes");
if (action_modify_field->src.level &&
action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot copy from inner headers");
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "inner header fields modification"
+ " is not supported");
}
if (action_modify_field->dst.field ==
action_modify_field->src.field)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "source and destination fields"
- " cannot be the same");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "source and destination fields"
+ " cannot be the same");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "immediate value or a pointer to it"
- " cannot be used as a destination");
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "immediate value or a pointer to it"
+ " cannot be used as a destination");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
action_modify_field->src.field == RTE_FLOW_FIELD_START)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifications of an arbitrary"
" place in a packet is not supported");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifications of the 802.1Q Tag"
" Identifier is not supported");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifications of the VXLAN Network"
" Identifier is not supported");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifications of the GENEVE Network"
" Identifier is not supported");
if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
- action_modify_field->src.field == RTE_FLOW_FIELD_MARK) {
+ action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
+ action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_META) {
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "cannot modify mark without extended"
- " metadata register support");
+ "cannot modify mark or metadata without"
+ " extended metadata register support");
}
if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
"add and sub operations"
" are not supported");
return (action_modify_field->width / 32) +
return ret;
++actions_n;
break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = flow_dv_validate_action_l2_encap(dev,
+ sub_action_flags,
+ act, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ ++actions_n;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
item_flags, attr,
error))
return -rte_errno;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
++actions_n;
break;
error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
++actions_n;
break;
(action_flags, actions, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count PCP with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
break;
actions, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count VID with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
rw_act_num += MLX5_ACT_NUM_MDF_VID;
attr, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_DECAP;
++actions_n;
break;
actions, item_flags, error);
if (ret < 0)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
+ (action_flags & MLX5_FLOW_ACTION_DECAP))
+ modify_after_mirror = 1;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
++actions_n;
return &cache_resource->entry;
error:
for (idx = 0; idx < resource->num_of_dest; idx++) {
- struct mlx5_flow_sub_actions_idx *act_res =
- &cache_resource->sample_idx[idx];
- if (act_res->rix_hrxq &&
- !mlx5_hrxq_release(dev,
- act_res->rix_hrxq))
- act_res->rix_hrxq = 0;
- if (act_res->rix_encap_decap &&
- !flow_dv_encap_decap_resource_release(dev,
- act_res->rix_encap_decap))
- act_res->rix_encap_decap = 0;
- if (act_res->rix_port_id_action &&
- !flow_dv_port_id_action_resource_release(dev,
- act_res->rix_port_id_action))
- act_res->rix_port_id_action = 0;
- if (act_res->rix_jump &&
- !flow_dv_jump_tbl_resource_release(dev,
- act_res->rix_jump))
- act_res->rix_jump = 0;
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx[idx]);
if (dest_attr[idx])
mlx5_free(dest_attr[idx]);
}
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
}
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* Save the encap resource before sample */
pre_rix = dev_flow->handle->dvh.rix_encap_decap;
dev_flow->handle->dvh.rix_encap_decap;
sample_act->dr_encap_action =
dev_flow->dv.encap_decap->action;
+ dev_flow->handle->dvh.rix_encap_decap = 0;
}
if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
normal_idx++;
dev_flow->handle->rix_port_id_action;
sample_act->dr_port_id_action =
dev_flow->dv.port_id_action->action;
+ dev_flow->handle->rix_port_id_action = 0;
}
if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
normal_idx++;
static int
__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
const uint64_t hash_fields,
- const int tunnel,
uint32_t hrxq_idx)
{
- uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+ uint32_t *hrxqs = action->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_SRC_ONLY:
hrxqs[0] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
hrxqs[1] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
hrxqs[2] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_SRC_ONLY:
hrxqs[3] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
hrxqs[4] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_IPV6_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
hrxqs[5] = hrxq_idx;
return 0;
case MLX5_RSS_HASH_NONE:
*/
static uint32_t
__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
- const uint64_t hash_fields,
- const int tunnel)
+ const uint64_t hash_fields)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
- shared_rss->hrxq_tunnel;
+ const uint32_t *hrxqs = shared_rss->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_SRC_ONLY:
return hrxqs[0];
case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
return hrxqs[1];
case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
return hrxqs[2];
case MLX5_RSS_HASH_IPV6:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_SRC_ONLY:
return hrxqs[3];
case MLX5_RSS_HASH_IPV6_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
return hrxqs[4];
case MLX5_RSS_HASH_IPV6_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
default:
return 0;
}
+
}
/**
n = dv->actions_n;
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
if (dv->transfer) {
- dv->actions[n++] = priv->sh->esw_drop_action;
+ MLX5_ASSERT(priv->sh->dr_drop_action);
+ dv->actions[n++] = priv->sh->dr_drop_action;
} else {
+#ifdef HAVE_MLX5DV_DR
+ /* DR supports drop action placeholder. */
+ MLX5_ASSERT(priv->sh->dr_drop_action);
+ dv->actions[n++] = priv->sh->dr_drop_action;
+#else
+ /* For DV we use the explicit drop queue. */
MLX5_ASSERT(priv->drop_queue.hrxq);
dv->actions[n++] =
priv->drop_queue.hrxq->action;
+#endif
}
} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
!dv_h->rix_sample && !dv_h->rix_dest_array)) {
hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
rss_desc->shared_rss,
- dev_flow->hash_fields,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ dev_flow->hash_fields);
if (hrxq_idx)
hrxq = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_HRXQ],
return;
switch (handle->fate_action) {
case MLX5_FLOW_FATE_QUEUE:
- mlx5_hrxq_release(dev, handle->rix_hrxq);
+ if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
break;
case MLX5_FLOW_FATE_JUMP:
flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
struct mlx5_shared_action_rss *shared_rss)
{
- return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
- __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
+ return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
+}
+
+/**
+ * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
+ * user input.
+ *
+ * Only one hash value is available for one L3+L4 combination:
+ * for example:
+ * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
+ * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
+ * same slot in mlx5_rss_hash_fields.
+ *
+ * @param[in] rss
+ * Pointer to the shared action RSS conf.
+ * @param[in, out] hash_field
+ * hash_field variable needed to be adjusted.
+ *
+ * @return
+ * void
+ */
+static void
+__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
+ uint64_t *hash_field)
+{
+ uint64_t rss_types = rss->origin.types;
+
+ switch (*hash_field & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ *hash_field &= ~MLX5_RSS_HASH_IPV4;
+ if (rss_types & ETH_RSS_L3_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_IPV4;
+ else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_IPV4;
+ else
+ *hash_field |= MLX5_RSS_HASH_IPV4;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV6:
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ *hash_field &= ~MLX5_RSS_HASH_IPV6;
+ if (rss_types & ETH_RSS_L3_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_IPV6;
+ else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_IPV6;
+ else
+ *hash_field |= MLX5_RSS_HASH_IPV6;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV4_UDP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_UDP:
+ if (rss_types & ETH_RSS_UDP) {
+ *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
+ if (rss_types & ETH_RSS_L4_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
+ else
+ *hash_field |= MLX5_UDP_IBV_RX_HASH;
+ }
+ return;
+ case MLX5_RSS_HASH_IPV4_TCP:
+ /* fall-through. */
+ case MLX5_RSS_HASH_IPV6_TCP:
+ if (rss_types & ETH_RSS_TCP) {
+ *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
+ if (rss_types & ETH_RSS_L4_DST_ONLY)
+ *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
+ else
+ *hash_field |= MLX5_TCP_IBV_RX_HASH;
+ }
+ return;
+ default:
+ return;
+ }
}
/**
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
- int tunnel;
+ int tunnel = 0;
- for (tunnel = 0; tunnel < 2; tunnel++) {
- rss_desc.tunnel = tunnel;
- rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot get hash queue");
- goto error_hrxq_new;
- }
- err = __flow_dv_action_rss_hrxq_set
- (shared_rss, hash_fields, tunnel, hrxq_idx);
- MLX5_ASSERT(!err);
+ __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+ if (shared_rss->origin.level > 1) {
+ hash_fields |= IBV_RX_HASH_INNER;
+ tunnel = 1;
}
+ rss_desc.tunnel = tunnel;
+ rss_desc.hash_fields = hash_fields;
+ hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq_idx) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error_hrxq_new;
+ }
+ err = __flow_dv_action_rss_hrxq_set
+ (shared_rss, hash_fields, hrxq_idx);
+ MLX5_ASSERT(!err);
}
return 0;
error_hrxq_new:
*/
static uint32_t
__flow_dv_action_rss_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action_rss *rss,
struct rte_flow_error *error)
{
"cannot allocate resource memory");
goto error_rss_init;
}
- if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
+ if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
rte_flow_error_set(error, E2BIG,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"rss action number out of range");
}
/**
- * Create shared action, lock free,
+ * Create indirect action, lock free,
* (mutex should be acquired by caller).
* Dispatcher for action type specific call.
*
* @param[in] conf
* Shared action configuration.
* @param[in] action
- * Action specification used to create shared action.
+ * Action specification used to create indirect action.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
* A valid shared action handle in case of success, NULL otherwise and
* rte_errno is set.
*/
-static struct rte_flow_shared_action *
+static struct rte_flow_action_handle *
flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *err)
{
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
- idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
- MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
- idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
- MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
if (ret) {
struct mlx5_aso_age_action *aso_age =
flow_aso_age_get_by_idx(dev, ret);
NULL, "action type not supported");
break;
}
- return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
+ return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
}
/**
- * Destroy the shared action.
+ * Destroy the indirect action.
* Release action related resources on the NIC and the memory.
* Lock free, (mutex should be acquired by caller).
* Dispatcher for action type specific call.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] action
- * The shared action object to be removed.
+ * @param[in] handle
+ * The indirect action object handle to be removed.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
+ struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
{
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
int ret;
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
return __flow_dv_action_rss_release(dev, idx, error);
- case MLX5_SHARED_ACTION_TYPE_AGE:
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
ret = flow_dv_aso_age_release(dev, idx);
if (ret)
/*
* In this case, the last flow has a reference will
* actually release the age action.
*/
- DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
+ DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
" released with references %d.", idx, ret);
return 0;
default:
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] action
- * The shared action object to be updated.
- * @param[in] action_conf
- * Action specification used to modify *action*.
- * *action_conf* should be of type correlating with type of the *action*,
- * otherwise considered as invalid.
+ * @param[in] handle
+ * The indirect action object handle to be updated.
+ * @param[in] update
+ * Action specification used to modify the action pointed by *handle*.
+ * *update* could be of same type with the action pointed by the *handle*
+ * handle argument, or some other structures like a wrapper, depending on
+ * the indirect action type.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- const void *action_conf,
+ struct rte_flow_action_handle *handle,
+ const void *update,
struct rte_flow_error *err)
{
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ const void *action_conf;
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
+ action_conf = ((const struct rte_flow_action *)update)->conf;
return __flow_dv_action_rss_update(dev, idx, action_conf, err);
default:
return rte_flow_error_set(err, ENOTSUP,
static int
flow_dv_action_query(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action *action, void *data,
+ const struct rte_flow_action_handle *handle, void *data,
struct rte_flow_error *error)
{
struct mlx5_age_param *age_param;
struct rte_flow_query_age *resp;
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_AGE:
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
resp->aged = __atomic_load_n(&age_param->state,
&actions[0]);
if (ret)
goto err;
- actions[1] = priv->drop_queue.hrxq->action;
+ actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
+ priv->drop_queue.hrxq->action;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&matcher);
}
/**
- * Validate shared action.
+ * Validate indirect action.
* Dispatcher for action type specific validation.
*
* @param[in] dev
* @param[in] conf
* Shared action configuration.
* @param[in] action
- * The shared action object to validate.
+ * The indirect action object to validate.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_validate(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *err)
{