#include "mlx5.h"
#include "mlx5_prm.h"
+/* Define minimal priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 4
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
const void *default_mask,
void *data);
-struct rte_flow {
- LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+/** Structure for Drop queue. */
+struct mlx5_hrxq_drop {
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
- struct ibv_exp_wq *wq; /**< Verbs work queue. */
+ struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */
- struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
+};
+
+/* Flows structures. */
+struct mlx5_flow {
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+};
+
+/* Drop flows structures. */
+struct mlx5_flow_drop {
+ struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
+ uint32_t drop:1; /**< Drop queue. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+ uint16_t (*queues)[]; /**< Queues indexes to use. */
+ union {
+ struct mlx5_flow frxq; /**< Flow with Rx queue. */
+ struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
+ };
};
/** Static initializer for items. */
RTE_FLOW_ACTION_TYPE_DROP,
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
RTE_FLOW_ACTION_TYPE_END,
};
.mask = &(const struct rte_flow_item_eth){
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
},
+ .default_mask = &rte_flow_item_eth_mask,
.mask_sz = sizeof(struct rte_flow_item_eth),
.convert = mlx5_flow_create_eth,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
.mask = &(const struct rte_flow_item_vlan){
.tci = -1,
},
+ .default_mask = &rte_flow_item_vlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vlan),
.convert = mlx5_flow_create_vlan,
.dst_sz = 0,
.next_proto_id = -1,
},
},
- .default_mask = &(const struct rte_flow_item_ipv4){
- .hdr = {
- .src_addr = -1,
- .dst_addr = -1,
- },
- },
+ .default_mask = &rte_flow_item_ipv4_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv4),
.convert = mlx5_flow_create_ipv4,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
},
+ .vtc_flow = -1,
+ .proto = -1,
+ .hop_limits = -1,
},
},
+ .default_mask = &rte_flow_item_ipv6_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv6),
.convert = mlx5_flow_create_ipv6,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
.dst_port = -1,
},
},
+ .default_mask = &rte_flow_item_udp_mask,
.mask_sz = sizeof(struct rte_flow_item_udp),
.convert = mlx5_flow_create_udp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.actions = valid_actions,
.dst_port = -1,
},
},
+ .default_mask = &rte_flow_item_tcp_mask,
.mask_sz = sizeof(struct rte_flow_item_tcp),
.convert = mlx5_flow_create_tcp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
.mask = &(const struct rte_flow_item_vxlan){
.vni = "\xff\xff\xff",
},
+ .default_mask = &rte_flow_item_vxlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vxlan),
.convert = mlx5_flow_create_vxlan,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+ .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
},
};
-/** Structure to pass to the conversion function. */
-struct mlx5_flow {
- struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
- unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
- uint32_t inner; /**< Set once VXLAN is encountered. */
-};
-
+/* Structure to parse actions. */
struct mlx5_flow_action {
uint32_t queue:1; /**< Target is a receive queue. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
- uint32_t queue_id; /**< Identifier of the queue. */
uint32_t mark_id; /**< Mark identifier. */
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
};
+/** Structure to pass to the conversion function. */
+struct mlx5_flow_parse {
+ struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+ uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct mlx5_flow_action actions; /**< Parsed action result. */
+};
+
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .query = NULL,
+ .isolate = mlx5_flow_isolate,
+};
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+
+ if (filter_type == RTE_ETH_FILTER_GENERIC) {
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
+ }
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ return -ret;
+}
+
/**
* Check support for a given item.
*
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error,
- struct mlx5_flow *flow)
+ struct mlx5_flow_parse *flow)
{
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
- struct mlx5_flow_action action = {
- .queue = 0,
- .drop = 0,
- .mark = 0,
- };
(void)priv;
if (attr->group) {
"groups are not supported");
return -rte_errno;
}
- if (attr->priority) {
+ if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
- /* Handle special situation for VLAN. */
- if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- if (((const struct rte_flow_item_vlan *)items)->tci >
- ETHER_MAX_VLAN_ID) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "wrong VLAN id value");
- return -rte_errno;
- }
- }
for (i = 0;
cur_item->items &&
cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
cur_item = token;
err = mlx5_flow_item_validate(items,
(const uint8_t *)cur_item->mask,
- sizeof(cur_item->mask_sz));
+ cur_item->mask_sz);
if (err)
goto exit_item_not_supported;
if (flow->ibv_attr && cur_item->convert) {
flow);
if (err)
goto exit_item_not_supported;
+ } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (flow->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ flow->inner = 1;
}
flow->offset += cur_item->dst_sz;
}
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
+ flow->actions.drop = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+ uint16_t n;
+ uint16_t found = 0;
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- action.queue = 1;
+ for (n = 0; n < flow->actions.queues_n; ++n) {
+ if (flow->actions.queues[n] == queue->index) {
+ found = 1;
+ break;
+ }
+ }
+ if (flow->actions.queues_n > 1 && !found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS queues");
+ return -rte_errno;
+ }
+ if (!found) {
+ flow->actions.queue = 1;
+ flow->actions.queues_n = 1;
+ flow->actions.queues[0] = queue->index;
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+ uint16_t n;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no valid queues");
+ return -rte_errno;
+ }
+ if (flow->actions.queues_n == 1) {
+ uint16_t found = 0;
+
+ assert(flow->actions.queues_n);
+ for (n = 0; n < rss->num; ++n) {
+ if (flow->actions.queues[0] ==
+ rss->queue[n]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ for (n = 0; n < rss->num; ++n) {
+ if (rss->queue[n] >= priv->rxqs_n) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue id > number of"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ flow->actions.queue = 1;
+ for (n = 0; n < rss->num; ++n)
+ flow->actions.queues[n] = rss->queue[n];
+ flow->actions.queues_n = rss->num;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
actions->conf;
- if (mark && (mark->id >= MLX5_FLOW_MARK_MAX)) {
+ if (!mark) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be defined");
+ return -rte_errno;
+ } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
" and 16777199");
return -rte_errno;
}
- action.mark = 1;
+ flow->actions.mark = 1;
+ flow->actions.mark_id = mark->id;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
+ flow->actions.mark = 1;
} else {
goto exit_action_not_supported;
}
}
- if (action.mark && !flow->ibv_attr)
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
- if (!action.queue && !action.drop) {
+ if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop)
+ flow->offset += sizeof(struct ibv_flow_spec_action_tag);
+ if (!flow->ibv_attr && flow->actions.drop)
+ flow->offset += sizeof(struct ibv_flow_spec_action_drop);
+ if (!flow->actions.queue && !flow->actions.drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
{
struct priv *priv = dev->data->dev_private;
int ret;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
+ struct mlx5_flow_parse flow = {
+ .offset = sizeof(struct ibv_flow_attr),
+ .actions = {
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues_n = 0,
+ },
+ };
priv_lock(priv);
ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
unsigned int i;
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 2;
+ flow->hash_fields = 0;
eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *eth = (struct ibv_exp_flow_spec_eth) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
+ *eth = (struct ibv_flow_spec_eth) {
+ .type = flow->inner | IBV_FLOW_SPEC_ETH,
.size = eth_size,
};
if (!spec)
mask = default_mask;
memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->val.ether_type = spec->type;
memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->mask.ether_type = mask->type;
/* Remove unwanted bits from values. */
for (i = 0; i < ETHER_ADDR_LEN; ++i) {
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
eth->val.src_mac[i] &= eth->mask.src_mac[i];
}
+ eth->val.ether_type &= eth->mask.ether_type;
return 0;
}
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
if (!spec)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv4_ext *ipv4;
- unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4);
ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
+ *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
+ .type = flow->inner | IBV_FLOW_SPEC_IPV4_EXT,
.size = ipv4_size,
};
if (!spec)
return 0;
if (!mask)
mask = default_mask;
- ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
+ ipv4->val = (struct ibv_flow_ipv4_ext_filter){
.src_ip = spec->hdr.src_addr,
.dst_ip = spec->hdr.dst_addr,
.proto = spec->hdr.next_proto_id,
.tos = spec->hdr.type_of_service,
};
- ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
+ ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
.src_ip = mask->hdr.src_addr,
.dst_ip = mask->hdr.dst_addr,
.proto = mask->hdr.next_proto_id,
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv6 *ipv6;
- unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
unsigned int i;
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6);
ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv6 = (struct ibv_exp_flow_spec_ipv6) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6,
+ *ipv6 = (struct ibv_flow_spec_ipv6) {
+ .type = flow->inner | IBV_FLOW_SPEC_IPV6,
.size = ipv6_size,
};
if (!spec)
RTE_DIM(ipv6->mask.src_ip));
memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
RTE_DIM(ipv6->mask.dst_ip));
+ ipv6->mask.flow_label = mask->hdr.vtc_flow;
+ ipv6->mask.next_hdr = mask->hdr.proto;
+ ipv6->mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
}
+ ipv6->val.flow_label &= ipv6->mask.flow_label;
+ ipv6->val.next_hdr &= ipv6->mask.next_hdr;
+ ipv6->val.hop_limit &= ipv6->mask.hop_limit;
return 0;
}
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *udp;
- unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_tcp_udp *udp;
+ unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP);
udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *udp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
+ *udp = (struct ibv_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_FLOW_SPEC_UDP,
.size = udp_size,
};
if (!spec)
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *tcp;
- unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_tcp_udp *tcp;
+ unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP);
tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
+ *tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_FLOW_SPEC_TCP,
.size = tcp_size,
};
if (!spec)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tunnel *vxlan;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+ struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_tunnel *vxlan;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
union vni {
uint32_t vlan_id;
uint8_t vni[4];
flow->ibv_attr->priority = 0;
id.vni[0] = 0;
vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *vxlan = (struct ibv_exp_flow_spec_tunnel) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
+ *vxlan = (struct ibv_flow_spec_tunnel) {
+ .type = flow->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
.size = size,
};
- flow->inner = IBV_EXP_FLOW_SPEC_INNER;
+ flow->inner = IBV_FLOW_SPEC_INNER;
if (!spec)
return 0;
if (!mask)
* Mark identifier.
*/
static int
-mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
{
- struct ibv_exp_flow_spec_action_tag *tag;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag *tag;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tag = (struct ibv_exp_flow_spec_action_tag){
- .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+ *tag = (struct ibv_flow_spec_action_tag){
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
.size = size,
.tag_id = mlx5_flow_mark_set(mark_id),
};
return 0;
}
+/**
+ * Complete flow rule creation with a drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue_drop(struct priv *priv,
+ struct mlx5_flow_parse *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ struct ibv_flow_spec_action_drop *drop;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ rte_flow->drop = 1;
+ drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+ ++flow->ibv_attr->num_of_specs;
+ flow->offset += sizeof(struct ibv_flow_spec_action_drop);
+ rte_flow->ibv_attr = flow->ibv_attr;
+ if (!priv->dev->data->dev_started)
+ return rte_flow;
+ rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
+ rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
+ rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+error:
+ assert(rte_flow);
+ rte_free(rte_flow);
+ return NULL;
+}
+
/**
* Complete flow rule creation.
*
* @param priv
* Pointer to private structure.
- * @param ibv_attr
- * Verbs flow attributes.
- * @param action
- * Target action structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
* @param[out] error
* Perform verbose error reporting if not NULL.
*
*/
static struct rte_flow *
priv_flow_create_action_queue(struct priv *priv,
- struct ibv_exp_flow_attr *ibv_attr,
- struct mlx5_flow_action *action,
+ struct mlx5_flow_parse *flow,
struct rte_flow_error *error)
{
- struct rxq_ctrl *rxq;
struct rte_flow *rte_flow;
+ unsigned int i;
assert(priv->pd);
assert(priv->ctx);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ assert(!flow->actions.drop);
+ rte_flow =
+ rte_calloc(__func__, 1,
+ sizeof(*flow) +
+ flow->actions.queues_n * sizeof(uint16_t),
+ 0);
if (!rte_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate flow memory");
return NULL;
}
- if (action->drop) {
- rte_flow->cq =
- ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
- if (!rte_flow->cq) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate CQ");
- goto error;
- }
- rte_flow->wq = ibv_exp_create_wq(priv->ctx,
- &(struct ibv_exp_wq_init_attr){
- .wq_type = IBV_EXP_WQT_RQ,
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- .pd = priv->pd,
- .cq = rte_flow->cq,
- });
- } else {
- rxq = container_of((*priv->rxqs)[action->queue_id],
- struct rxq_ctrl, rxq);
- rte_flow->rxq = &rxq->rxq;
- rxq->rxq.mark |= action->mark;
- rte_flow->wq = rxq->wq;
- }
- rte_flow->mark = action->mark;
- rte_flow->ibv_attr = ibv_attr;
- rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
- .pd = priv->pd,
- .log_ind_tbl_size = 0,
- .ind_tbl = &rte_flow->wq,
- .comp_mask = 0,
- });
- if (!rte_flow->ind_table) {
+ rte_flow->mark = flow->actions.mark;
+ rte_flow->ibv_attr = flow->ibv_attr;
+ rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
+ memcpy(rte_flow->queues, flow->actions.queues,
+ flow->actions.queues_n * sizeof(uint16_t));
+ rte_flow->queues_n = flow->actions.queues_n;
+ rte_flow->frxq.hash_fields = flow->hash_fields;
+ rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->hash_fields,
+ (*rte_flow->queues),
+ rte_flow->queues_n);
+ if (rte_flow->frxq.hrxq) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate indirection table");
+ NULL, "duplicated flow");
goto error;
}
- rte_flow->qp = ibv_exp_create_qp(
- priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
- .rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- .rwq_ind_tbl = rte_flow->ind_table,
- },
- .port_num = priv->port,
- });
- if (!rte_flow->qp) {
+ rte_flow->frxq.hrxq = mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->hash_fields,
+ (*rte_flow->queues),
+ rte_flow->queues_n);
+ if (!rte_flow->frxq.hrxq) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate QP");
+ NULL, "cannot create hash rxq");
goto error;
}
- rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
- rte_flow->ibv_attr);
+ for (i = 0; i != flow->actions.queues_n; ++i) {
+ struct mlx5_rxq_data *q =
+ (*priv->rxqs)[flow->actions.queues[i]];
+
+ q->mark |= flow->actions.mark;
+ }
+ if (!priv->dev->data->dev_started)
+ return rte_flow;
+ rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
+ rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
return rte_flow;
error:
assert(rte_flow);
- if (rte_flow->qp)
- ibv_destroy_qp(rte_flow->qp);
- if (rte_flow->ind_table)
- ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
- if (!rte_flow->rxq && rte_flow->wq)
- ibv_exp_destroy_wq(rte_flow->wq);
- if (!rte_flow->rxq && rte_flow->cq)
- ibv_destroy_cq(rte_flow->cq);
- rte_free(rte_flow->ibv_attr);
+ if (rte_flow->frxq.hrxq)
+ mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
rte_free(rte_flow);
return NULL;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
*/
static struct rte_flow *
priv_flow_create(struct priv *priv,
+ struct mlx5_flows *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct rte_flow *rte_flow;
- struct mlx5_flow_action action;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
+ struct mlx5_flow_parse flow = {
+ .offset = sizeof(struct ibv_flow_attr),
+ .actions = {
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues = { 0 },
+ .queues_n = 0,
+ },
+ };
int err;
err = priv_flow_validate(priv, attr, items, actions, error, &flow);
if (err)
goto exit;
flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- flow.offset = sizeof(struct ibv_exp_flow_attr);
+ flow.offset = sizeof(struct ibv_flow_attr);
if (!flow.ibv_attr) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate ibv_attr memory");
goto exit;
}
- *flow.ibv_attr = (struct ibv_exp_flow_attr){
- .type = IBV_EXP_FLOW_ATTR_NORMAL,
- .size = sizeof(struct ibv_exp_flow_attr),
+ *flow.ibv_attr = (struct ibv_flow_attr){
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(struct ibv_flow_attr),
.priority = attr->priority,
.num_of_specs = 0,
.port = 0,
.flags = 0,
- .reserved = 0,
};
flow.inner = 0;
+ flow.hash_fields = 0;
claim_zero(priv_flow_validate(priv, attr, items, actions,
error, &flow));
- action = (struct mlx5_flow_action){
- .queue = 0,
- .drop = 0,
- .mark = 0,
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- };
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- action.queue = 1;
- action.queue_id =
- ((const struct rte_flow_action_queue *)
- actions->conf)->index;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
- const struct rte_flow_action_mark *mark =
- (const struct rte_flow_action_mark *)
- actions->conf;
-
- if (mark)
- action.mark_id = mark->id;
- action.mark = 1;
- } else {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "unsupported action");
- goto exit;
- }
+ if (flow.actions.mark && !flow.actions.drop) {
+ mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id);
+ flow.offset += sizeof(struct ibv_flow_spec_action_tag);
}
- if (action.mark) {
- mlx5_flow_create_flag_mark(&flow, action.mark_id);
- flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+ if (flow.actions.drop)
+ rte_flow =
+ priv_flow_create_action_queue_drop(priv, &flow, error);
+ else
+ rte_flow = priv_flow_create_action_queue(priv, &flow, error);
+ if (!rte_flow)
+ goto exit;
+ if (rte_flow) {
+ TAILQ_INSERT_TAIL(list, rte_flow, next);
+ DEBUG("Flow created %p", (void *)rte_flow);
}
- rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
- &action, error);
return rte_flow;
exit:
rte_free(flow.ibv_attr);
struct rte_flow *flow;
priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- LIST_INSERT_HEAD(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
+ flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
+ error);
priv_unlock(priv);
return flow;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
priv_flow_destroy(struct priv *priv,
+ struct mlx5_flows *list,
struct rte_flow *flow)
{
- (void)priv;
- LIST_REMOVE(flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
- if (flow->qp)
- claim_zero(ibv_destroy_qp(flow->qp));
- if (flow->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
- if (!flow->rxq && flow->wq)
- claim_zero(ibv_exp_destroy_wq(flow->wq));
- if (!flow->rxq && flow->cq)
- claim_zero(ibv_destroy_cq(flow->cq));
- if (flow->mark) {
+ unsigned int i;
+ uint16_t *queues;
+ uint16_t queues_n;
+
+ if (flow->drop || !flow->mark)
+ goto free;
+ queues = flow->frxq.hrxq->ind_table->queues;
+ queues_n = flow->frxq.hrxq->ind_table->queues_n;
+ for (i = 0; i != queues_n; ++i) {
struct rte_flow *tmp;
- uint32_t mark_n = 0;
-
- for (tmp = LIST_FIRST(&priv->flows);
- tmp;
- tmp = LIST_NEXT(tmp, next)) {
- if ((flow->rxq == tmp->rxq) && tmp->mark)
- ++mark_n;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
+ int mark = 0;
+
+ /*
+ * To remove the mark from the queue, the queue must not be
+ * present in any other marked flow (RSS or not).
+ */
+ TAILQ_FOREACH(tmp, list, next) {
+ unsigned int j;
+
+ if (!tmp->mark)
+ continue;
+ for (j = 0;
+ (j != tmp->frxq.hrxq->ind_table->queues_n) &&
+ !mark;
+ j++)
+ if (tmp->frxq.hrxq->ind_table->queues[j] ==
+ queues[i])
+ mark = 1;
}
- flow->rxq->mark = !!mark_n;
+ rxq_data->mark = mark;
}
+free:
+ if (flow->ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ if (!flow->drop)
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ TAILQ_REMOVE(list, flow, next);
rte_free(flow->ibv_attr);
DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow);
(void)error;
priv_lock(priv);
- priv_flow_destroy(priv, flow);
+ priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv);
return 0;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
-static void
-priv_flow_flush(struct priv *priv)
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{
- while (!LIST_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
- flow = LIST_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
}
}
(void)error;
priv_lock(priv);
- priv_flow_flush(priv);
+ priv_flow_flush(priv, &priv->flows);
priv_unlock(priv);
return 0;
}
+/**
+ * Create drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+priv_flow_create_drop_queue(struct priv *priv)
+{
+ struct mlx5_hrxq_drop *fdq = NULL;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ WARN("cannot allocate memory for drop queue");
+ goto error;
+ }
+ fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!fdq->cq) {
+ WARN("cannot allocate CQ for drop queue");
+ goto error;
+ }
+ fdq->wq = ibv_create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wq) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &fdq->wq,
+ .comp_mask = 0,
+ });
+ if (!fdq->ind_table) {
+ WARN("cannot allocate indirection table for drop queue");
+ goto error;
+ }
+ fdq->qp = ibv_create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ },
+ .rwq_ind_tbl = fdq->ind_table,
+ .pd = priv->pd
+ });
+ if (!fdq->qp) {
+ WARN("cannot allocate QP for drop queue");
+ goto error;
+ }
+ priv->flow_drop_queue = fdq;
+ return 0;
+error:
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ if (fdq)
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+ return -1;
+}
+
+/**
+ * Delete drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_delete_drop_queue(struct priv *priv)
+{
+ struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
+
+ if (!fdq)
+ return;
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+}
+
/**
* Remove all flows.
*
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv)
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{
struct rte_flow *flow;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
- if (flow->mark)
- flow->rxq->mark = 0;
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ flow->frxq.hrxq = NULL;
+ if (flow->mark) {
+ unsigned int n;
+ struct mlx5_ind_table_ibv *ind_tbl =
+ flow->frxq.hrxq->ind_table;
+
+ for (n = 0; n < ind_tbl->queues_n; ++n)
+ (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
+ }
DEBUG("Flow %p removed", (void *)flow);
}
+ priv_flow_delete_drop_queue(priv);
}
/**
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv)
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{
+ int ret;
struct rte_flow *flow;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- flow->ibv_flow = ibv_exp_create_flow(flow->qp,
- flow->ibv_attr);
+ ret = priv_flow_create_drop_queue(priv);
+ if (ret)
+ return -1;
+ TAILQ_FOREACH(flow, list, next) {
+ if (flow->frxq.hrxq)
+ goto flow_create;
+ flow->frxq.hrxq =
+ mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->frxq.hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq.hrxq)
+ goto flow_create;
+ flow->frxq.hrxq =
+ mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->frxq.hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq.hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
+ flow->ibv_attr);
if (!flow->ibv_flow) {
DEBUG("Flow %p cannot be applied", (void *)flow);
rte_errno = EINVAL;
return rte_errno;
}
DEBUG("Flow %p applied", (void *)flow);
- if (flow->rxq)
- flow->rxq->mark |= flow->mark;
+ if (flow->mark) {
+ unsigned int n;
+
+ for (n = 0;
+ n < flow->frxq.hrxq->ind_table->queues_n;
+ ++n) {
+ uint16_t idx =
+ flow->frxq.hrxq->ind_table->queues[n];
+ (*priv->rxqs)[idx]->mark = 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable/disable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param spec
+ * An Ethernet flow spec to apply.
+ * @param mask
+ * An Ethernet flow mask to apply.
+ * @param enable
+ * Enable/disable the flow.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *spec,
+ struct rte_flow_item_eth *mask,
+ unsigned int enable)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = spec,
+ .last = NULL,
+ .mask = mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &(struct rte_flow_action_queue){
+ .index = 0,
+ },
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ if (enable) {
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items,
+ actions, &error);
+ if (!flow)
+ return 1;
+ } else {
+ struct spec {
+ struct ibv_flow_attr ibv_attr;
+ struct ibv_flow_spec_eth eth;
+ } spec;
+ struct mlx5_flow_parse parser = {
+ .ibv_attr = &spec.ibv_attr,
+ .offset = sizeof(struct ibv_flow_attr),
+ };
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int attr_size = sizeof(struct ibv_flow_attr);
+
+ claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser));
+ TAILQ_FOREACH(flow, &priv->ctrl_flows, next) {
+ eth = (void *)((uintptr_t)flow->ibv_attr + attr_size);
+ assert(eth->type == IBV_FLOW_SPEC_ETH);
+ if (!memcmp(eth, &spec.eth, sizeof(*eth)))
+ break;
+ }
+ if (flow) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ rte_free(flow->ibv_attr);
+ DEBUG("Control flow destroyed %p", (void *)flow);
+ TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
+ rte_free(flow);
+ }
}
return 0;
}