" VXLAN encapsulations");
return -rte_errno;
}
- parser->inner = 1;
+ parser->inner = IBV_FLOW_SPEC_INNER;
}
if (parser->drop) {
parser->drop_q.offset += cur_item->dst_sz;
cur_item->mask),
parser);
if (ret) {
- rte_flow_error_set(error, ENOTSUP,
+ rte_flow_error_set(error, ret,
RTE_FLOW_ERROR_TYPE_ITEM,
items, "item not supported");
goto exit_free;
/* Remove unwanted bits from values. */
vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
}
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule.
+ * To avoid such situation, VNI 0 is currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return EINVAL;
mlx5_flow_create_copy(parser, &vxlan, size);
return 0;
}
parser->rss_conf.rss_key_len,
hash_fields,
parser->queues,
- hash_fields ? parser->queues_n : 1);
+ parser->queues_n);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
parser->rss_conf.rss_key_len,
hash_fields,
parser->queues,
- hash_fields ? parser->queues_n : 1);
+ parser->queues_n);
if (!flow->frxq[i].hrxq) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
.spec = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
};
break;
priv_fdir_filter_delete(struct priv *priv,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct mlx5_fdir attributes;
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ };
struct mlx5_flow_parse parser = {
.create = 1,
.layer = HASH_RXQ_ETH,
attributes.actions, &error, &parser);
if (ret)
goto exit;
+ /*
+ * Special case for drop action which is only set in the
+ * specifications when the flow is created. In this situation the
+ * drop specification is missing.
+ */
+ if (parser.drop) {
+ struct ibv_flow_spec_action_drop *drop;
+
+ drop = (void *)((uintptr_t)parser.drop_q.ibv_attr +
+ parser.drop_q.offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ };
+ parser.drop_q.ibv_attr->num_of_specs++;
+ }
TAILQ_FOREACH(flow, &priv->flows, next) {
struct ibv_flow_attr *attr;
struct ibv_spec_header *attr_h;
flow_h = flow_spec;
if (memcmp(spec, flow_spec,
RTE_MIN(attr_h->size, flow_h->size)))
- continue;
+ goto wrong_flow;
spec = (void *)((uintptr_t)attr + attr_h->size);
flow_spec = (void *)((uintptr_t)flow_attr +
flow_h->size);
}
/* At this point, the flow match. */
break;
+wrong_flow:
+ /* The flow does not match. */
+ continue;
}
if (flow)
priv_flow_destroy(priv, &priv->flows, flow);