uint32_t attr;
};
-#define MLX5_FLOW_IPV4_LRO (1 << 0)
-#define MLX5_FLOW_IPV6_LRO (1 << 1)
-
/**
* Initialize flow attributes structure according to flow items' types.
*
};
static void
-mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
+mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
+ uint8_t next_protocol, uint64_t *item_flags,
+ int *tunnel)
{
- uint8_t next_protocol = 0xFF;
-
- if (item->mask != NULL) {
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_IPV4:
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (item->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (item->mask))->hdr.next_proto_id;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- (item->spec))->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- (item->mask))->hdr.proto;
- break;
- default:
- break;
- }
+ assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ if (next_protocol == IPPROTO_IPIP) {
+ *item_flags |= MLX5_FLOW_LAYER_IPIP;
+ *tunnel = 1;
+ }
+ if (next_protocol == IPPROTO_IPV6) {
+ *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+ *tunnel = 1;
}
- if (next_protocol == IPPROTO_IPIP)
- *flags |= MLX5_FLOW_LAYER_IPIP;
- if (next_protocol == IPPROTO_IPV6)
- *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
}
/**
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct rte_flow_action_raw_encap *raw_encap =
+ (const struct rte_flow_action_raw_encap *)action->conf;
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
NULL,
"encap action not supported for "
"ingress");
+ if (!raw_encap->size || !raw_encap->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "raw encap data cannot be empty");
return 0;
}
/* VLAN skipping */
while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
- next_hdr += sizeof(struct rte_vlan_hdr);
vlan = (struct rte_vlan_hdr *)next_hdr;
proto = RTE_BE16(vlan->eth_proto);
+ next_hdr += sizeof(struct rte_vlan_hdr);
}
/* HW calculates IPv4 csum. no need to proceed */
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
NULL, "group must be smaller than "
- RTE_STR(MLX5_MAX_FDB_TABLES));
+ RTE_STR(MLX5_MAX_TABLES_FDB));
}
if (!(attributes->egress ^ attributes->ingress))
return rte_flow_error_set(error, ENOTSUP,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
- error);
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
NULL, error);
if (ret < 0)
/* Reset for inner layer. */
next_protocol = 0xff;
}
- mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
NULL, error);
if (ret < 0)
/* Reset for inner layer. */
next_protocol = 0xff;
}
- mlx5_flow_tunnel_ip_check(items, &last_item);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_gre(items, item_flags,
next_protocol, error);
if (ret < 0)
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_NVGRE;
+ break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
ret = mlx5_flow_validate_item_gre_key
(items, item_flags, gre_item, error);
/**
* Add VLAN item to matcher and to the value.
*
+ * @param[in, out] dev_flow
+ * Flow descriptor.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Item is inner pattern.
*/
static void
-flow_dv_translate_item_vlan(void *matcher, void *key,
+flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
+ void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
- const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
- };
void *headers_m;
void *headers_v;
uint16_t tci_m;
if (!vlan_v)
return;
if (!vlan_m)
- vlan_m = &nic_mask;
+ vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ /*
+ * This is workaround, masks are not supported,
+ * and pre-validated.
+ */
+ dev_flow->dv.vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
int size;
int i;
- flow_dv_translate_item_gre(matcher, key, item, inner);
+ /* For NVGRE, GRE header fields must be set with defined values. */
+ const struct rte_flow_item_gre gre_spec = {
+ .c_rsvd0_ver = RTE_BE16(0x2000),
+ .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
+ };
+ const struct rte_flow_item_gre gre_mask = {
+ .c_rsvd0_ver = RTE_BE16(0xB000),
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+ const struct rte_flow_item gre_item = {
+ .spec = &gre_spec,
+ .mask = &gre_mask,
+ .last = NULL,
+ };
+ flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
if (!nvgre_v)
return;
if (!nvgre_m)
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-#ifdef HAVE_MLX5DV_DR
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
-#endif
return match_criteria_enable;
}
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
flow->group = attr->group;
if (attr->transfer)
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(match_mask, match_value,
+ flow_dv_translate_item_vlan(dev_flow,
+ match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- mlx5_flow_tunnel_ip_check(items, &last_item);
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- mlx5_flow_tunnel_ip_check(items, &last_item);
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
(*flow->queue),
flow->rss.queue_num);
if (!hrxq) {
- int lro = 0;
-
- if (mlx5_lro_on(dev)) {
- if ((dev_flow->layers &
- MLX5_FLOW_LAYER_IPV4_LRO)
- == MLX5_FLOW_LAYER_IPV4_LRO)
- lro = MLX5_FLOW_IPV4_LRO;
- else if ((dev_flow->layers &
- MLX5_FLOW_LAYER_IPV6_LRO)
- == MLX5_FLOW_LAYER_IPV6_LRO)
- lro = MLX5_FLOW_IPV6_LRO;
- }
hrxq = mlx5_hrxq_new
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL), lro);
+ MLX5_FLOW_LAYER_TUNNEL));
}
-
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"hardware refuses to create flow");
goto error;
}
+ if (priv->vmwa_context &&
+ dev_flow->dv.vf_vlan.tag &&
+ !dev_flow->dv.vf_vlan.created) {
+ /*
+ * The rule contains the VLAN pattern.
+ * For VF we are going to create VLAN
+ * interface to make hypervisor set correct
+ * e-Switch vport context.
+ */
+ mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ }
}
return 0;
error:
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
+ if (dev_flow->dv.vf_vlan.tag &&
+ dev_flow->dv.vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
+ if (dev_flow->dv.vf_vlan.tag &&
+ dev_flow->dv.vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
}