#include "mlx5.h"
#include "mlx5_prm.h"
+/* Define minimal priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 4
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
const void *default_mask,
void *data);
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+/** Structure for Drop queue. */
+struct mlx5_hrxq_drop {
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_flow *ibv_flow; /**< Verbs flow. */
struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */
- uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
+};
+
+/* Flows structures. */
+struct mlx5_flow {
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+};
+
+/* Drop flows structures. */
+struct mlx5_flow_drop {
+ struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct rxq *rxqs[]; /**< Pointer to the queues array. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+ uint16_t (*queues)[]; /**< Queues indexes to use. */
+ union {
+ struct mlx5_flow frxq; /**< Flow with Rx queue. */
+ struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
+ };
};
/** Static initializer for items. */
},
};
-/* Structure to parse actions. */
-struct mlx5_flow_action {
+/** Structure to pass to the conversion function. */
+struct mlx5_flow_parse {
+ struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+ uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint32_t create:1; /**< Leave allocated resources on exit. */
uint32_t queue:1; /**< Target is a receive queue. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint16_t queues_n; /**< Number of entries in queue[]. */
};
-/** Structure to pass to the conversion function. */
-struct mlx5_flow_parse {
- struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
- unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
- uint32_t inner; /**< Set once VXLAN is encountered. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct mlx5_flow_action actions; /**< Parsed action result. */
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .query = NULL,
+ .isolate = mlx5_flow_isolate,
};
-/** Structure for Drop queue. */
-struct rte_flow_drop {
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_wq *wq; /**< Verbs work queue. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
-};
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+
+ if (filter_type == RTE_ETH_FILTER_GENERIC) {
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
+ }
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ return -ret;
+}
/**
* Check support for a given item.
}
/**
- * Validate a flow supported by the NIC.
+ * Validate and convert a flow supported by the NIC.
*
* @param priv
* Pointer to private structure.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_validate(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *flow)
+priv_flow_convert(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *flow)
{
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
(void)priv;
+ *flow = (struct mlx5_flow_parse){
+ .ibv_attr = flow->ibv_attr,
+ .create = flow->create,
+ .offset = sizeof(struct ibv_flow_attr),
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ };
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
"groups are not supported");
return -rte_errno;
}
- if (attr->priority) {
+ if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
"only ingress is supported");
return -rte_errno;
}
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx5_flow_items *token = NULL;
- unsigned int i;
- int err;
-
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
- continue;
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx5_flow_items[items->type];
- break;
- }
- }
- if (!token)
- goto exit_item_not_supported;
- cur_item = token;
- err = mlx5_flow_item_validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
- goto exit_item_not_supported;
- if (flow->ibv_attr && cur_item->convert) {
- err = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- flow);
- if (err)
- goto exit_item_not_supported;
- } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
- if (flow->inner) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "cannot recognize multiple"
- " VXLAN encapsulations");
- return -rte_errno;
- }
- flow->inner = 1;
- }
- flow->offset += cur_item->dst_sz;
- }
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- flow->actions.drop = 1;
+ flow->drop = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- for (n = 0; n < flow->actions.queues_n; ++n) {
- if (flow->actions.queues[n] == queue->index) {
+ for (n = 0; n < flow->queues_n; ++n) {
+ if (flow->queues[n] == queue->index) {
found = 1;
break;
}
}
- if (flow->actions.queues_n > 1 && !found) {
+ if (flow->queues_n > 1 && !found) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
return -rte_errno;
}
if (!found) {
- flow->actions.queue = 1;
- flow->actions.queues_n = 1;
- flow->actions.queues[0] = queue->index;
+ flow->queue = 1;
+ flow->queues_n = 1;
+ flow->queues[0] = queue->index;
}
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
"no valid queues");
return -rte_errno;
}
- if (flow->actions.queues_n == 1) {
+ if (flow->queues_n == 1) {
uint16_t found = 0;
- assert(flow->actions.queues_n);
+ assert(flow->queues_n);
for (n = 0; n < rss->num; ++n) {
- if (flow->actions.queues[0] ==
+ if (flow->queues[0] ==
rss->queue[n]) {
found = 1;
break;
return -rte_errno;
}
}
- flow->actions.queue = 1;
+ flow->queue = 1;
for (n = 0; n < rss->num; ++n)
- flow->actions.queues[n] = rss->queue[n];
- flow->actions.queues_n = rss->num;
+ flow->queues[n] = rss->queue[n];
+ flow->queues_n = rss->num;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
" and 16777199");
return -rte_errno;
}
- flow->actions.mark = 1;
- flow->actions.mark_id = mark->id;
+ flow->mark = 1;
+ flow->mark_id = mark->id;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
- flow->actions.mark = 1;
+ flow->mark = 1;
} else {
goto exit_action_not_supported;
}
}
- if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop)
+ if (flow->mark && !flow->ibv_attr && !flow->drop)
flow->offset += sizeof(struct ibv_flow_spec_action_tag);
- if (!flow->ibv_attr && flow->actions.drop)
+ if (!flow->ibv_attr && flow->drop)
flow->offset += sizeof(struct ibv_flow_spec_action_drop);
- if (!flow->actions.queue && !flow->actions.drop) {
+ if (!flow->queue && !flow->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
}
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx5_flow_items *token = NULL;
+ unsigned int i;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx5_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = mlx5_flow_item_validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow->ibv_attr && cur_item->convert) {
+ err = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ flow);
+ if (err)
+ goto exit_item_not_supported;
+ } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (flow->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ flow->inner = 1;
+ }
+ flow->offset += cur_item->dst_sz;
+ }
return 0;
exit_item_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
return -rte_errno;
}
-/**
- * Validate a flow supported by the NIC.
- *
- * @see rte_flow_validate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
- int ret;
- struct mlx5_flow_parse flow = {
- .offset = sizeof(struct ibv_flow_attr),
- .actions = {
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues_n = 0,
- },
- };
-
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
- priv_unlock(priv);
- return ret;
-}
-
/**
* Convert Ethernet item to Verbs specification.
*
struct ibv_flow_spec_action_tag *tag;
unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ assert(flow->mark);
tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
*tag = (struct ibv_flow_spec_action_tag){
.type = IBV_FLOW_SPEC_ACTION_TAG,
.tag_id = mlx5_flow_mark_set(mark_id),
};
++flow->ibv_attr->num_of_specs;
+ flow->offset += size;
return 0;
}
++flow->ibv_attr->num_of_specs;
flow->offset += sizeof(struct ibv_flow_spec_action_drop);
rte_flow->ibv_attr = flow->ibv_attr;
- if (!priv->started)
+ if (!priv->dev->data->dev_started)
return rte_flow;
- rte_flow->qp = priv->flow_drop_queue->qp;
- rte_flow->ibv_flow = ibv_create_flow(rte_flow->qp,
+ rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
+ rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
{
struct rte_flow *rte_flow;
unsigned int i;
- unsigned int j;
- const unsigned int wqs_n = 1 << log2above(flow->actions.queues_n);
- struct ibv_wq *wqs[wqs_n];
assert(priv->pd);
assert(priv->ctx);
- assert(!flow->actions.drop);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) +
- sizeof(*rte_flow->rxqs) * flow->actions.queues_n,
+ assert(!flow->drop);
+ rte_flow = rte_calloc(__func__, 1,
+ sizeof(*rte_flow) +
+ flow->queues_n * sizeof(uint16_t),
0);
if (!rte_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate flow memory");
return NULL;
}
- for (i = 0; i < flow->actions.queues_n; ++i) {
- struct rxq_ctrl *rxq;
-
- rxq = container_of((*priv->rxqs)[flow->actions.queues[i]],
- struct rxq_ctrl, rxq);
- wqs[i] = rxq->wq;
- rte_flow->rxqs[i] = &rxq->rxq;
- ++rte_flow->rxqs_n;
- rxq->rxq.mark |= flow->actions.mark;
- }
- /* finalise indirection table. */
- for (j = 0; i < wqs_n; ++i, ++j) {
- wqs[i] = wqs[j];
- if (j == flow->actions.queues_n)
- j = 0;
- }
- rte_flow->mark = flow->actions.mark;
+ rte_flow->mark = flow->mark;
rte_flow->ibv_attr = flow->ibv_attr;
- rte_flow->hash_fields = flow->hash_fields;
- rte_flow->ind_table = ibv_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = log2above(flow->actions.queues_n),
- .ind_tbl = wqs,
- .comp_mask = 0,
- });
- if (!rte_flow->ind_table) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate indirection table");
- goto error;
+ rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
+ memcpy(rte_flow->queues, flow->queues,
+ flow->queues_n * sizeof(uint16_t));
+ rte_flow->queues_n = flow->queues_n;
+ rte_flow->frxq.hash_fields = flow->hash_fields;
+ rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->hash_fields,
+ (*rte_flow->queues),
+ rte_flow->queues_n);
+ if (!rte_flow->frxq.hrxq) {
+ rte_flow->frxq.hrxq =
+ mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->hash_fields,
+ (*rte_flow->queues),
+ rte_flow->queues_n);
+ if (!rte_flow->frxq.hrxq) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot create hash rxq");
+ goto error;
+ }
}
- rte_flow->qp = ibv_create_qp_ex(
- priv->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function =
- IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = rte_flow->hash_fields,
- },
- .rwq_ind_tbl = rte_flow->ind_table,
- .pd = priv->pd
- });
- if (!rte_flow->qp) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate QP");
- goto error;
+ for (i = 0; i != flow->queues_n; ++i) {
+ struct mlx5_rxq_data *q =
+ (*priv->rxqs)[flow->queues[i]];
+
+ q->mark |= flow->mark;
}
- if (!priv->started)
+ if (!priv->dev->data->dev_started)
return rte_flow;
- rte_flow->ibv_flow = ibv_create_flow(rte_flow->qp,
+ rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
return rte_flow;
error:
assert(rte_flow);
- if (rte_flow->qp)
- ibv_destroy_qp(rte_flow->qp);
- if (rte_flow->ind_table)
- ibv_destroy_rwq_ind_table(rte_flow->ind_table);
+ if (rte_flow->frxq.hrxq)
+ mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
rte_free(rte_flow);
return NULL;
}
/**
- * Convert a flow.
+ * Validate a flow.
*
* @param priv
* Pointer to private structure.
* Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
+ * @param[in,out] parser
+ * MLX5 parser structure.
*
* @return
- * A flow on success, NULL otherwise.
+ * 0 on success, negative errno value on failure.
*/
-static struct rte_flow *
-priv_flow_create(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static int
+priv_flow_validate(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
{
- struct rte_flow *rte_flow;
- struct mlx5_flow_parse flow = {
- .offset = sizeof(struct ibv_flow_attr),
- .actions = {
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues = { 0 },
- .queues_n = 0,
- },
- };
int err;
- err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ err = priv_flow_convert(priv, attr, items, actions, error, parser);
if (err)
goto exit;
- flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- flow.offset = sizeof(struct ibv_flow_attr);
- if (!flow.ibv_attr) {
+ if (parser->mark)
+ parser->offset += sizeof(struct ibv_flow_spec_action_tag);
+ parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
+ if (!parser->ibv_attr) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate ibv_attr memory");
+ err = rte_errno;
goto exit;
}
- *flow.ibv_attr = (struct ibv_flow_attr){
+ *parser->ibv_attr = (struct ibv_flow_attr){
.type = IBV_FLOW_ATTR_NORMAL,
.size = sizeof(struct ibv_flow_attr),
.priority = attr->priority,
.port = 0,
.flags = 0,
};
- flow.inner = 0;
- flow.hash_fields = 0;
- claim_zero(priv_flow_validate(priv, attr, items, actions,
- error, &flow));
- if (flow.actions.mark && !flow.actions.drop) {
- mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id);
- flow.offset += sizeof(struct ibv_flow_spec_action_tag);
- }
- if (flow.actions.drop)
- rte_flow =
- priv_flow_create_action_queue_drop(priv, &flow, error);
+ err = priv_flow_convert(priv, attr, items, actions, error, parser);
+ if (err || parser->create)
+ goto exit;
+ if (parser->mark)
+ mlx5_flow_create_flag_mark(parser, parser->mark_id);
+ return 0;
+exit:
+ if (parser->ibv_attr)
+ rte_free(parser->ibv_attr);
+ return err;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+ struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_parse parser = { .create = 1, };
+ struct rte_flow *flow;
+ int err;
+
+ err = priv_flow_validate(priv, attr, items, actions, error, &parser);
+ if (err)
+ goto exit;
+ if (parser.drop)
+ flow = priv_flow_create_action_queue_drop(priv, &parser, error);
else
- rte_flow = priv_flow_create_action_queue(priv, &flow, error);
- if (!rte_flow)
+ flow = priv_flow_create_action_queue(priv, &parser, error);
+ if (!flow)
goto exit;
- return rte_flow;
+ TAILQ_INSERT_TAIL(list, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ return flow;
exit:
- rte_free(flow.ibv_attr);
+ if (parser.ibv_attr)
+ rte_free(parser.ibv_attr);
return NULL;
}
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx5_flow_parse parser = { .create = 0, };
+
+ priv_lock(priv);
+ ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
+ priv_unlock(priv);
+ return ret;
+}
+
/**
* Create a flow.
*
struct rte_flow *flow;
priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- TAILQ_INSERT_TAIL(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
+ flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
+ error);
priv_unlock(priv);
return flow;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
priv_flow_destroy(struct priv *priv,
+ struct mlx5_flows *list,
struct rte_flow *flow)
{
- TAILQ_REMOVE(&priv->flows, flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
- if (flow->drop)
+ unsigned int i;
+ uint16_t *queues;
+ uint16_t queues_n;
+
+ if (flow->drop || !flow->mark)
goto free;
- if (flow->qp)
- claim_zero(ibv_destroy_qp(flow->qp));
- if (flow->ind_table)
- claim_zero(ibv_destroy_rwq_ind_table(flow->ind_table));
- if (flow->mark) {
+ queues = flow->frxq.hrxq->ind_table->queues;
+ queues_n = flow->frxq.hrxq->ind_table->queues_n;
+ for (i = 0; i != queues_n; ++i) {
struct rte_flow *tmp;
- struct rxq *rxq;
- uint32_t mark_n = 0;
- uint32_t queue_n;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
+ int mark = 0;
/*
* To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not).
*/
- for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
- rxq = flow->rxqs[queue_n];
- for (tmp = TAILQ_FIRST(&priv->flows);
- tmp;
- tmp = TAILQ_NEXT(tmp, next)) {
- uint32_t tqueue_n;
-
- if (tmp->drop)
- continue;
- for (tqueue_n = 0;
- tqueue_n < tmp->rxqs_n;
- ++tqueue_n) {
- struct rxq *trxq;
-
- trxq = tmp->rxqs[tqueue_n];
- if (rxq == trxq)
- ++mark_n;
- }
- }
- rxq->mark = !!mark_n;
+ TAILQ_FOREACH(tmp, list, next) {
+ unsigned int j;
+
+ if (!tmp->mark)
+ continue;
+ for (j = 0;
+ (j != tmp->frxq.hrxq->ind_table->queues_n) &&
+ !mark;
+ j++)
+ if (tmp->frxq.hrxq->ind_table->queues[j] ==
+ queues[i])
+ mark = 1;
}
+ rxq_data->mark = mark;
}
free:
+ if (flow->ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ if (!flow->drop)
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ TAILQ_REMOVE(list, flow, next);
rte_free(flow->ibv_attr);
DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow);
(void)error;
priv_lock(priv);
- priv_flow_destroy(priv, flow);
+ priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv);
return 0;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
-static void
-priv_flow_flush(struct priv *priv)
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{
- while (!TAILQ_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
- flow = TAILQ_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
}
}
(void)error;
priv_lock(priv);
- priv_flow_flush(priv);
+ priv_flow_flush(priv, &priv->flows);
priv_unlock(priv);
return 0;
}
* @return
* 0 on success.
*/
-static int
+int
priv_flow_create_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = NULL;
+ struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd);
assert(priv->ctx);
* @param priv
* Pointer to private structure.
*/
-static void
+void
priv_flow_delete_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = priv->flow_drop_queue;
+ struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq)
return;
/**
* Remove all flows.
*
- * Called by dev_stop() to remove all flows.
- *
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv)
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
claim_zero(ibv_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ flow->frxq.hrxq = NULL;
if (flow->mark) {
unsigned int n;
+ struct mlx5_ind_table_ibv *ind_tbl =
+ flow->frxq.hrxq->ind_table;
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 0;
+ for (n = 0; n < ind_tbl->queues_n; ++n)
+ (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
}
DEBUG("Flow %p removed", (void *)flow);
}
- priv_flow_delete_drop_queue(priv);
}
/**
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv)
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{
- int ret;
struct rte_flow *flow;
- ret = priv_flow_create_drop_queue(priv);
- if (ret)
- return -1;
- TAILQ_FOREACH(flow, &priv->flows, next) {
- struct ibv_qp *qp;
-
- if (flow->drop)
- qp = priv->flow_drop_queue->qp;
- else
- qp = flow->qp;
- flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ TAILQ_FOREACH(flow, list, next) {
+ if (flow->frxq.hrxq)
+ goto flow_create;
+ flow->frxq.hrxq =
+ mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->frxq.hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq.hrxq)
+ goto flow_create;
+ flow->frxq.hrxq =
+ mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+ rss_hash_default_key_len,
+ flow->frxq.hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq.hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
+ flow->ibv_attr);
if (!flow->ibv_flow) {
DEBUG("Flow %p cannot be applied", (void *)flow);
rte_errno = EINVAL;
if (flow->mark) {
unsigned int n;
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 1;
- }
- }
- return 0;
-}
-
-/**
- * Verify if the Rx queue is used in a flow.
- *
- * @param priv
- * Pointer to private structure.
- * @param rxq
- * Pointer to the queue to search.
- *
- * @return
- * Nonzero if the queue is used by a flow.
- */
-int
-priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
-{
- struct rte_flow *flow;
-
- for (flow = TAILQ_FIRST(&priv->flows);
- flow;
- flow = TAILQ_NEXT(flow, next)) {
- unsigned int n;
-
- if (flow->drop)
- continue;
- for (n = 0; n < flow->rxqs_n; ++n) {
- if (flow->rxqs[n] == rxq)
- return 1;
+ for (n = 0;
+ n < flow->frxq.hrxq->ind_table->queues_n;
+ ++n) {
+ uint16_t idx =
+ flow->frxq.hrxq->ind_table->queues[n];
+ (*priv->rxqs)[idx]->mark = 1;
+ }
}
}
return 0;
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
- if (priv->started) {
+ if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
priv_unlock(priv);
return 0;
}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &(struct rte_flow_action_queue){
+ .index = 0,
+ },
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+ &error);
+ if (!flow)
+ return rte_errno;
+ return 0;
+}
+
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}