#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
-#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
- sizeof(struct rte_flow_item_ipv4))
/* VLAN header definitions */
#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
* Pointer to item specification.
* @param[out] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
*/
static void
-flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
+flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
+ struct mlx5_flow *dev_flow, bool tunnel_decap)
{
+ /*
+ * If layers is already initialized, it means this dev_flow is the
+ * suffix flow, the layers flags is set by the prefix flow. Need to
+ * use the layer flags from prefix flow as the suffix flow may not
+ * have the user defined items as the flow is split.
+ */
+ if (dev_flow->layers) {
+ if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ attr->ipv4 = 1;
+ else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+ attr->ipv6 = 1;
+ if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ attr->tcp = 1;
+ else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ attr->udp = 1;
+ attr->valid = 1;
+ return;
+ }
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ uint8_t next_protocol = 0xff;
switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ if (tunnel_decap)
+ attr->attr = 0;
+ break;
case RTE_FLOW_ITEM_TYPE_IPV4:
if (!attr->ipv6)
attr->ipv4 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ item->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (item->spec))->hdr.next_proto_id &
+ ((const struct rte_flow_item_ipv4 *)
+ (item->mask))->hdr.next_proto_id;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
if (!attr->ipv4)
attr->ipv6 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ item->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (item->spec))->hdr.proto &
+ ((const struct rte_flow_item_ipv6 *)
+ (item->mask))->hdr.proto;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
if (!attr->tcp)
const struct rte_flow_action_of_set_vlan_vid *conf =
(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
int i = resource->actions_num;
- struct mlx5_modification_cmd *actions = &resource->actions[i];
+ struct mlx5_modification_cmd *actions = resource->actions;
struct field_modify_info *field = modify_vlan_out_first_vid;
if (i >= MLX5_MAX_MODIFY_NUM)
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_tp *conf =
(const struct rte_flow_action_set_tp *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->udp) {
memset(&udp, 0, sizeof(udp));
memset(&udp_mask, 0, sizeof(udp_mask));
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_ttl *conf =
(const struct rte_flow_action_set_ttl *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
flow_dv_convert_action_modify_dec_ttl
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
struct rte_flow_item item;
struct rte_flow_item_ipv4 ipv4;
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
if (items == NULL)
return;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
- items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
- ;
- if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int type = items->type;
+
+ if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
+ type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
+ break;
+ }
+ if (items->type != RTE_FLOW_ITEM_TYPE_END) {
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
/* Only full match values are accepted */
if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
- vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |=
rte_be_to_cpu_16(vlan_v->tci &
MLX5DV_FLOW_VLAN_PCP_MASK_BE);
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
- if (attr->ingress)
+ if (!attr->transfer && attr->ingress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
NULL,
* Holds the actions detected until now.
* @param[in] action
* Pointer to the action structure.
- * @param[in] attr
- * Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
static int
flow_dv_validate_action_l2_encap(uint64_t action_flags,
const struct rte_flow_action *action,
- const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
- if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap or"
- " decap action in a flow");
- if (!attr->transfer && attr->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "encap action not supported for "
- "ingress");
+ "can only have a single encap action "
+ "in a flow");
return 0;
}
/**
- * Validate the L2 decap action.
+ * Validate a decap action.
*
* @param[in] action_flags
* Holds the actions detected until now.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_l2_decap(uint64_t action_flags,
+flow_dv_validate_action_decap(uint64_t action_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
- return rte_flow_error_set(error, EINVAL,
+ if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap or"
- " decap action in a flow");
+ action_flags &
+ MLX5_FLOW_ACTION_DECAP ? "can only "
+ "have a single decap action" : "decap "
+ "after encap is not supported");
if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
return 0;
}
-/**
- * Validate the raw encap action.
- *
- * @param[in] action_flags
- * Holds the actions detected until now.
- * @param[in] action
- * Pointer to the encap action.
- * @param[in] attr
- * Pointer to flow attributes
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_dv_validate_action_raw_encap(uint64_t action_flags,
- const struct rte_flow_action *action,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_raw_encap *raw_encap =
- (const struct rte_flow_action_raw_encap *)action->conf;
- if (!(action->conf))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "configuration cannot be null");
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap"
- " action in a flow");
- /* encap without preceding decap is not supported for ingress */
- if (!attr->transfer && attr->ingress &&
- !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "encap action not supported for "
- "ingress");
- if (!raw_encap->size || !raw_encap->data)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "raw encap data cannot be empty");
- return 0;
-}
+const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
/**
- * Validate the raw decap action.
+ * Validate the raw encap and decap actions.
*
- * @param[in] action_flags
- * Holds the actions detected until now.
- * @param[in] action
+ * @param[in] decap
+ * Pointer to the decap action.
+ * @param[in] encap
* Pointer to the encap action.
* @param[in] attr
* Pointer to flow attributes
+ * @param[in/out] action_flags
+ * Holds the actions detected until now.
+ * @param[out] actions_n
+ * pointer to the number of actions counter.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_raw_decap(uint64_t action_flags,
- const struct rte_flow_action *action,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+flow_dv_validate_action_raw_encap_decap
+ (const struct rte_flow_action_raw_decap *decap,
+ const struct rte_flow_action_raw_encap *encap,
+ const struct rte_flow_attr *attr, uint64_t *action_flags,
+ int *actions_n, struct rte_flow_error *error)
{
- const struct rte_flow_action_raw_decap *decap = action->conf;
+ int ret;
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have encap action before"
- " decap action");
- if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
+ if (encap && (!encap->size || !encap->data))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single decap"
- " action in a flow");
- /* decap action is valid on egress only if it is followed by encap */
- if (attr->egress && decap &&
- decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL, "decap action not supported"
- " for egress");
- } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
- (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "can't have decap action "
- "after modify action");
+ "raw encap data cannot be empty");
+ if (decap && encap) {
+ if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
+ encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 encap. */
+ decap = NULL;
+ else if (encap->size <=
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ decap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 decap. */
+ encap = NULL;
+ else if (encap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ decap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* 2 L2 actions: encap and decap. */
+ ;
+ else
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unsupported too small "
+ "raw decap and too small raw "
+ "encap combination");
+ }
+ if (decap) {
+ ret = flow_dv_validate_action_decap(*action_flags, attr, error);
+ if (ret < 0)
+ return ret;
+ *action_flags |= MLX5_FLOW_ACTION_DECAP;
+ ++(*actions_n);
+ }
+ if (encap) {
+ if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "small raw encap size");
+ if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "more than one encap action");
+ *action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ ++(*actions_n);
}
return 0;
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no transport layer "
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
int actions_n = 0;
uint8_t item_ipv6_proto = 0;
const struct rte_flow_item *gre_item = NULL;
- struct rte_flow_item_tcp nic_tcp_mask = {
+ const struct rte_flow_action_raw_decap *decap;
+ const struct rte_flow_action_raw_encap *encap;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
.src_port = RTE_BE16(UINT16_MAX),
.dst_port = RTE_BE16(UINT16_MAX),
}
};
+ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+ const struct rte_flow_item_ipv6 nic_ipv6_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
+ uint16_t queue_index = 0xFFFF;
if (items == NULL)
return -1;
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv4_mask,
error);
if (ret < 0)
return ret;
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv6_mask,
error);
if (ret < 0)
return ret;
error);
if (ret < 0)
return ret;
+ item_ipv6_proto = IPPROTO_ICMPV6;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
attr, error);
if (ret < 0)
return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (actions->conf))->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
attr, item_flags,
error);
if (ret < 0)
return ret;
+ if (rss != NULL && rss->queue_num)
+ queue_index = rss->queue[0];
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = flow_dv_validate_action_l2_encap(action_flags,
- actions, attr,
- error);
+ actions, error);
if (ret < 0)
return ret;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
- MLX5_FLOW_ACTION_VXLAN_ENCAP :
- MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- ret = flow_dv_validate_action_l2_decap(action_flags,
- attr, error);
+ ret = flow_dv_validate_action_decap(action_flags, attr,
+ error);
if (ret < 0)
return ret;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
- MLX5_FLOW_ACTION_VXLAN_DECAP :
- MLX5_FLOW_ACTION_NVGRE_DECAP;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
- ret = flow_dv_validate_action_raw_encap(action_flags,
- actions, attr,
- error);
+ ret = flow_dv_validate_action_raw_encap_decap
+ (NULL, actions->conf, attr, &action_flags,
+ &actions_n, error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
- ++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
- ret = flow_dv_validate_action_raw_decap(action_flags,
- actions, attr,
- error);
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ encap = NULL;
+ actions--;
+ } else {
+ encap = actions->conf;
+ }
+ ret = flow_dv_validate_action_raw_encap_decap
+ (decap ? decap : &empty_decap, encap,
+ attr, &action_flags, &actions_n,
+ error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
- ++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
actions,
"no fate action is found");
}
+ /* Continue validation for Xcap actions.*/
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't supported");
+ if (!attr->transfer && attr->ingress && (action_flags &
+ MLX5_FLOW_ACTION_ENCAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ }
return 0;
}
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ if (eth_v->type) {
+ /* When ethertype is present set mask for tagged VLAN. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /* Set value for tagged VLAN if ethertype is 802.1Q. */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
+ 1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
+ }
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
rte_be_to_cpu_16(eth_m->type));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
.dst_addr = RTE_BE32(0xffffffff),
.type_of_service = 0xff,
.next_proto_id = 0xff,
+ .time_to_live = 0xff,
},
};
void *headers_m;
ipv4_m->hdr.next_proto_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
ipv6_m->hdr.proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+ /* Hop limit. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
+ /* GRE K bit must be on and should already be validated */
+ MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
if (!key_v)
return;
if (!key_m)
key_m = &gre_key_default_mask;
- /* GRE K bit must be on and should already be validated */
- MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
- MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
rte_be_to_cpu_32(*key_m) >> 8);
MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
return;
if (!icmp6_m)
icmp6_m = &rte_flow_item_icmp6_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
icmp6_v->type & icmp6_m->type);
return;
if (!icmp_m)
icmp_m = &rte_flow_item_icmp_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv4 ICMP packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
- MLX5_FLOW_ACTION_VXLAN_ENCAP :
- MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
- MLX5_FLOW_ACTION_VXLAN_DECAP :
- MLX5_FLOW_ACTION_NVGRE_DECAP;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* Handle encap with preceding decap. */
- if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
+ if (action_flags & MLX5_FLOW_ACTION_DECAP) {
if (flow_dv_create_action_raw_encap
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
}
- action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
- /* Check if this decap is followed by encap. */
- for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
- action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
- action++) {
- }
- /* Handle decap only if it isn't followed by encap. */
+ while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
if (flow_dv_create_action_l2_decap
(dev, dev_flow, attr->transfer, error))
dev_flow->dv.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
- action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
if (flow_dv_convert_action_modify_tp
(mhdr_res, actions, items,
- &flow_attr, error))
+ &flow_attr, dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
if (flow_dv_convert_action_modify_dec_ttl
- (mhdr_res, items, &flow_attr, error))
+ (mhdr_res, items, &flow_attr, dev_flow,
+ !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
if (flow_dv_convert_action_modify_ttl
- (mhdr_res, actions, items,
- &flow_attr, error))
+ (mhdr_res, actions, items, &flow_attr,
+ dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel,
+ items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel,
+ items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GTP;
break;
default:
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
#endif
- dev_flow->layers = item_flags;
+ /*
+ * Layers may be already initialized from prefix flow if this dev_flow
+ * is the suffix flow.
+ */
+ dev_flow->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */