};
static void
-mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
+mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
+ uint8_t next_protocol, uint64_t *item_flags,
+ int *tunnel)
{
- uint8_t next_protocol = 0xFF;
-
- if (item->mask != NULL) {
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_IPV4:
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (item->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (item->mask))->hdr.next_proto_id;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- (item->spec))->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- (item->mask))->hdr.proto;
- break;
- default:
- break;
- }
+ assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ if (next_protocol == IPPROTO_IPIP) {
+ *item_flags |= MLX5_FLOW_LAYER_IPIP;
+ *tunnel = 1;
+ }
+ if (next_protocol == IPPROTO_IPV6) {
+ *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+ *tunnel = 1;
}
- if (next_protocol == IPPROTO_IPIP)
- *flags |= MLX5_FLOW_LAYER_IPIP;
- if (next_protocol == IPPROTO_IPV6)
- *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
}
/**
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct rte_flow_action_raw_encap *raw_encap =
+ (const struct rte_flow_action_raw_encap *)action->conf;
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
NULL,
"encap action not supported for "
"ingress");
+ if (!raw_encap->size || !raw_encap->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "raw encap data cannot be empty");
return 0;
}
/* VLAN skipping */
while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
- next_hdr += sizeof(struct rte_vlan_hdr);
vlan = (struct rte_vlan_hdr *)next_hdr;
proto = RTE_BE16(vlan->eth_proto);
+ next_hdr += sizeof(struct rte_vlan_hdr);
}
/* HW calculates IPv4 csum. no need to proceed */
* Validate jump action.
*
* @param[in] action
- * Pointer to the modify action.
+ * Pointer to the jump action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
* @param[in] group
* The group of the current flow.
* @param[out] error
*/
static int
flow_dv_validate_action_jump(const struct rte_flow_action *action,
+ uint64_t action_flags,
uint32_t group,
struct rte_flow_error *error)
{
- if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
+ if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+ MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
if (!(priv->representor || priv->master))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "E-Switch configurationd can only be"
+ NULL, "E-Switch configuration can only be"
" done by a master or a representor device");
if (attributes->egress)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
NULL, "group must be smaller than "
- RTE_STR(MLX5_MAX_FDB_TABLES));
+ RTE_STR(MLX5_MAX_TABLES_FDB));
}
if (!(attributes->egress ^ attributes->ingress))
return rte_flow_error_set(error, ENOTSUP,
MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
NULL, error);
if (ret < 0)
/* Reset for inner layer. */
next_protocol = 0xff;
}
- mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
NULL, error);
if (ret < 0)
/* Reset for inner layer. */
next_protocol = 0xff;
}
- mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
ret = flow_dv_validate_action_jump(actions,
+ action_flags,
attr->group, error);
if (ret)
return ret;
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
- const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
- };
void *headers_m;
void *headers_v;
uint16_t tci_m;
if (!vlan_v)
return;
if (!vlan_m)
- vlan_m = &nic_mask;
+ vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-#ifdef HAVE_MLX5DV_DR
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
-#endif
return match_criteria_enable;
}
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
- tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
+ tbl = flow_dv_tbl_resource_get(dev, matcher->group,
matcher->egress, matcher->transfer,
error);
if (!tbl) {
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
flow->group = attr->group;
if (attr->transfer)
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
- tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
- MLX5_GROUP_FACTOR,
+ tbl = flow_dv_tbl_resource_get(dev, jump_data->group,
attr->egress,
attr->transfer, error);
if (!tbl)
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- mlx5_flow_tunnel_ip_check(items, &last_item);
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- mlx5_flow_tunnel_ip_check(items, &last_item);
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,