* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/**
+ * @file
+ * Flow API operations for mlx4 driver.
+ */
+
+#include <arpa/inet.h>
#include <assert.h>
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
/* PMD headers. */
#include "mlx4.h"
#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
-/** Static initializer for items. */
-#define ITEMS(...) \
+/** Static initializer for a list of subsequent item types. */
+#define NEXT_ITEM(...) \
(const enum rte_flow_item_type []){ \
__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
}
-/** Structure to generate a simple graph of layers supported by the NIC. */
-struct mlx4_flow_items {
- /** List of possible actions for these items. */
- const enum rte_flow_action_type *const actions;
+/** Processor structure associated with a flow item. */
+struct mlx4_flow_proc_item {
/** Bit-masks corresponding to the possibilities for the item. */
const void *mask;
/**
* rte_flow item to convert.
* @param default_mask
* Default bit-masks to use when item->mask is not provided.
- * @param data
- * Internal structure to store the conversion.
+ * @param flow
+ * Flow rule handle to update.
*
* @return
* 0 on success, negative value otherwise.
*/
int (*convert)(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct rte_flow *flow);
/** Size in bytes of the destination structure. */
const unsigned int dst_sz;
- /** List of possible following items. */
- const enum rte_flow_item_type *const items;
+ /** List of possible subsequent items. */
+ const enum rte_flow_item_type *const next_item;
};
struct rte_flow_drop {
struct ibv_cq *cq; /**< Verbs completion queue. */
};
-/** Valid action for this PMD. */
-static const enum rte_flow_action_type valid_actions[] = {
- RTE_FLOW_ACTION_TYPE_DROP,
- RTE_FLOW_ACTION_TYPE_QUEUE,
- RTE_FLOW_ACTION_TYPE_END,
-};
-
/**
* Convert Ethernet item to Verbs specification.
*
* Item specification.
* @param default_mask[in]
* Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param flow[in, out]
+ * Flow rule handle to update.
*/
static int
mlx4_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct rte_flow *flow)
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
struct ibv_flow_spec_eth *eth;
const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
unsigned int i;
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 2;
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*eth = (struct ibv_flow_spec_eth) {
.type = IBV_FLOW_SPEC_ETH,
.size = eth_size,
* Item specification.
* @param default_mask[in]
* Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param flow[in, out]
+ * Flow rule handle to update.
*/
static int
mlx4_flow_create_vlan(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct rte_flow *flow)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
struct ibv_flow_spec_eth *eth;
const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
+ eth_size);
if (!spec)
return 0;
if (!mask)
* Item specification.
* @param default_mask[in]
* Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param flow[in, out]
+ * Flow rule handle to update.
*/
static int
mlx4_flow_create_ipv4(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct rte_flow *flow)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
struct ibv_flow_spec_ipv4 *ipv4;
unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 1;
- ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*ipv4 = (struct ibv_flow_spec_ipv4) {
.type = IBV_FLOW_SPEC_IPV4,
.size = ipv4_size,
* Item specification.
* @param default_mask[in]
* Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param flow[in, out]
+ * Flow rule handle to update.
*/
static int
mlx4_flow_create_udp(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct rte_flow *flow)
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
struct ibv_flow_spec_tcp_udp *udp;
unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 0;
- udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*udp = (struct ibv_flow_spec_tcp_udp) {
.type = IBV_FLOW_SPEC_UDP,
.size = udp_size,
* Item specification.
* @param default_mask[in]
* Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param flow[in, out]
+ * Flow rule handle to update.
*/
static int
mlx4_flow_create_tcp(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct rte_flow *flow)
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
struct ibv_flow_spec_tcp_udp *tcp;
unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
++flow->ibv_attr->num_of_specs;
flow->ibv_attr->priority = 0;
- tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*tcp = (struct ibv_flow_spec_tcp_udp) {
.type = IBV_FLOW_SPEC_TCP,
.size = tcp_size,
}
/** Graph of supported items and associated actions. */
-static const struct mlx4_flow_items mlx4_flow_items[] = {
+static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
[RTE_FLOW_ITEM_TYPE_END] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
},
[RTE_FLOW_ITEM_TYPE_ETH] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
- RTE_FLOW_ITEM_TYPE_IPV4),
- .actions = valid_actions,
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4),
.mask = &(const struct rte_flow_item_eth){
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.dst_sz = sizeof(struct ibv_flow_spec_eth),
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
- .actions = valid_actions,
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
.mask = &(const struct rte_flow_item_vlan){
- /* rte_flow_item_vlan_mask is invalid for mlx4. */
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- .tci = 0x0fff,
-#else
- .tci = 0xff0f,
-#endif
+ /* Only TCI VID matching is supported. */
+ .tci = RTE_BE16(0x0fff),
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.validate = mlx4_flow_validate_vlan,
.dst_sz = 0,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
- .actions = valid_actions,
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
.mask = &(const struct rte_flow_item_ipv4){
.hdr = {
- .src_addr = -1,
- .dst_addr = -1,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
},
},
.default_mask = &rte_flow_item_ipv4_mask,
.dst_sz = sizeof(struct ibv_flow_spec_ipv4),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
- .actions = valid_actions,
.mask = &(const struct rte_flow_item_udp){
.hdr = {
- .src_port = -1,
- .dst_port = -1,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
},
.default_mask = &rte_flow_item_udp_mask,
.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
- .actions = valid_actions,
.mask = &(const struct rte_flow_item_tcp){
.hdr = {
- .src_port = -1,
- .dst_port = -1,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
},
.default_mask = &rte_flow_item_tcp_mask,
};
/**
- * Validate a flow supported by the NIC.
+ * Make sure a flow rule is supported and initialize associated structure.
*
* @param priv
* Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
- * @param[in] items
+ * @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] flow
- * Flow structure to update.
+ * @param[in, out] addr
+ * Buffer where the resulting flow rule handle pointer must be stored.
+ * If NULL, stop processing after validation stage.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_validate(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx4_flow *flow)
+mlx4_flow_prepare(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow **addr)
{
- const struct mlx4_flow_items *cur_item = mlx4_flow_items;
- struct mlx4_flow_action action = {
- .queue = 0,
- .drop = 0,
- };
-
- (void)priv;
- if (attr->group) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups are not supported");
- return -rte_errno;
- }
- if (attr->priority) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priorities are not supported");
- return -rte_errno;
- }
- if (attr->egress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- return -rte_errno;
- }
- if (!attr->ingress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "only ingress is supported");
- return -rte_errno;
- }
- /* Go over items list. */
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx4_flow_items *token = NULL;
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ const struct mlx4_flow_proc_item *proc;
+ struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
+ struct rte_flow *flow = &temp;
+ uint32_t priority_override = 0;
+
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups are not supported");
+ if (priv->isolated)
+ priority_override = attr->priority;
+ else if (attr->priority)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported outside isolated mode");
+ if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "maximum priority level is "
+ MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "egress is not supported");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL, "only ingress is supported");
+fill:
+ proc = mlx4_flow_proc_item_list;
+ /* Go over pattern. */
+ for (item = pattern; item->type; ++item) {
+ const struct mlx4_flow_proc_item *next = NULL;
unsigned int i;
int err;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
/*
* The nic can support patterns with NULL eth spec only
* if eth is a single item in a rule.
*/
- if (!items->spec &&
- items->type == RTE_FLOW_ITEM_TYPE_ETH) {
- const struct rte_flow_item *next = items + 1;
-
- if (next->type != RTE_FLOW_ITEM_TYPE_END) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "the rule requires"
- " an Ethernet spec");
- return -rte_errno;
- }
+ if (!item->spec && item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ const struct rte_flow_item *next = item + 1;
+
+ if (next->type)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "the rule requires an Ethernet spec");
}
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx4_flow_items[items->type];
+ for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
+ if (proc->next_item[i] == item->type) {
+ next = &mlx4_flow_proc_item_list[item->type];
break;
}
}
- if (!token)
+ if (!next)
goto exit_item_not_supported;
- cur_item = token;
- err = cur_item->validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
- goto exit_item_not_supported;
- if (flow->ibv_attr && cur_item->convert) {
- err = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- flow);
+ proc = next;
+ /* Perform validation once, while handle is not allocated. */
+ if (flow == &temp) {
+ err = proc->validate(item, proc->mask, proc->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ } else if (proc->convert) {
+ err = proc->convert(item,
+ (proc->default_mask ?
+ proc->default_mask :
+ proc->mask),
+ flow);
if (err)
goto exit_item_not_supported;
}
- flow->offset += cur_item->dst_sz;
+ flow->ibv_attr_size += proc->dst_sz;
}
- /* Go over actions list */
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ /* Use specified priority level when in isolated mode. */
+ if (priv->isolated && flow != &temp)
+ flow->ibv_attr->priority = priority_override;
+ /* Go over actions list. */
+ for (action = actions; action->type; ++action) {
+ switch (action->type) {
+ const struct rte_flow_action_queue *queue;
+
+ case RTE_FLOW_ACTION_TYPE_VOID:
continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- const struct rte_flow_action_queue *queue =
- (const struct rte_flow_action_queue *)
- actions->conf;
-
- if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ flow->drop = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = action->conf;
+ if (queue->index >= priv->dev->data->nb_rx_queues)
goto exit_action_not_supported;
- action.queue = 1;
- } else {
+ flow->queue = 1;
+ flow->queue_id = queue->index;
+ break;
+ default:
goto exit_action_not_supported;
}
}
- if (!action.queue && !action.drop) {
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "no valid action");
- return -rte_errno;
+ if (!flow->queue && !flow->drop)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "no valid action");
+ /* Validation ends here. */
+ if (!addr)
+ return 0;
+ if (flow == &temp) {
+ /* Allocate proper handle based on collected data. */
+ const struct mlx4_malloc_vec vec[] = {
+ {
+ .align = alignof(struct rte_flow),
+ .size = sizeof(*flow),
+ .addr = (void **)&flow,
+ },
+ {
+ .align = alignof(struct ibv_flow_attr),
+ .size = temp.ibv_attr_size,
+ .addr = (void **)&temp.ibv_attr,
+ },
+ };
+
+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
+ return rte_flow_error_set
+ (error, -rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule handle allocation failure");
+ /* Most fields will be updated by second pass. */
+ *flow = (struct rte_flow){
+ .ibv_attr = temp.ibv_attr,
+ .ibv_attr_size = sizeof(*flow->ibv_attr),
+ };
+ *flow->ibv_attr = (struct ibv_flow_attr){
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(*flow->ibv_attr),
+ .port = priv->port,
+ };
+ goto fill;
}
+ *addr = flow;
return 0;
exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "item not supported");
exit_action_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "action not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "action not supported");
}
/**
* @see rte_flow_validate()
* @see rte_flow_ops
*/
-int
+static int
mlx4_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
+ const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- int ret;
- struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
- priv_unlock(priv);
- return ret;
+ return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
}
/**
ERROR("Cannot allocate memory for drop struct");
goto err;
}
- cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
+ cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!cq) {
ERROR("Cannot create drop CQ");
goto err_create_cq;
}
- qp = ibv_exp_create_qp(priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .send_cq = cq,
- .recv_cq = cq,
- .cap = {
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT,
- .pd = priv->pd,
- .port_num = priv->port,
- });
+ qp = ibv_create_qp(priv->pd,
+ &(struct ibv_qp_init_attr){
+ .send_cq = cq,
+ .recv_cq = cq,
+ .cap = {
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ });
if (!qp) {
ERROR("Cannot create drop QP");
goto err_create_qp;
}
/**
- * Complete flow rule creation.
- *
- * @param priv
- * Pointer to private structure.
- * @param ibv_attr
- * Verbs flow attributes.
- * @param action
- * Target action structure.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * A flow if the rule could be created.
- */
-static struct rte_flow *
-priv_flow_create_action_queue(struct priv *priv,
- struct ibv_flow_attr *ibv_attr,
- struct mlx4_flow_action *action,
- struct rte_flow_error *error)
-{
- struct ibv_qp *qp;
- struct rte_flow *rte_flow;
-
- assert(priv->pd);
- assert(priv->ctx);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
- }
- if (action->drop) {
- qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
- } else {
- struct rxq *rxq = (*priv->rxqs)[action->queue_id];
-
- qp = rxq->qp;
- rte_flow->qp = qp;
- }
- rte_flow->ibv_attr = ibv_attr;
- if (!priv->started)
- return rte_flow;
- rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- goto error;
- }
- return rte_flow;
-
-error:
- rte_free(rte_flow);
- return NULL;
-}
-
-/**
- * Convert a flow.
+ * Toggle a configured flow rule.
*
* @param priv
* Pointer to private structure.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] items
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
+ * @param flow
+ * Flow rule handle to toggle.
+ * @param enable
+ * Whether associated Verbs flow must be created or removed.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct rte_flow *
-priv_flow_create(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
+static int
+mlx4_flow_toggle(struct priv *priv,
+ struct rte_flow *flow,
+ int enable,
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
- struct mlx4_flow_action action;
- struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
+ struct ibv_qp *qp = NULL;
+ const char *msg;
int err;
- err = priv_flow_validate(priv, attr, items, actions, error, &flow);
- if (err)
- return NULL;
- flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- if (!flow.ibv_attr) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate ibv_attr memory");
- return NULL;
+ if (!enable) {
+ if (!flow->ibv_flow)
+ return 0;
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ return 0;
}
- flow.offset = sizeof(struct ibv_flow_attr);
- *flow.ibv_attr = (struct ibv_flow_attr){
- .comp_mask = 0,
- .type = IBV_FLOW_ATTR_NORMAL,
- .size = sizeof(struct ibv_flow_attr),
- .priority = attr->priority,
- .num_of_specs = 0,
- .port = priv->port,
- .flags = 0,
- };
- claim_zero(priv_flow_validate(priv, attr, items, actions,
- error, &flow));
- action = (struct mlx4_flow_action){
- .queue = 0,
- .drop = 0,
- };
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- action.queue = 1;
- action.queue_id =
- ((const struct rte_flow_action_queue *)
- actions->conf)->index;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
- } else {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "unsupported action");
- goto exit;
+ if (flow->ibv_flow)
+ return 0;
+ assert(flow->queue ^ flow->drop);
+ if (flow->queue) {
+ struct rxq *rxq;
+
+ assert(flow->queue_id < priv->dev->data->nb_rx_queues);
+ rxq = priv->dev->data->rx_queues[flow->queue_id];
+ if (!rxq) {
+ err = EINVAL;
+ msg = "target queue must be configured first";
+ goto error;
}
+ qp = rxq->qp;
}
- rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
- &action, error);
- if (rte_flow)
- return rte_flow;
-exit:
- rte_free(flow.ibv_attr);
- return NULL;
+ if (flow->drop) {
+ assert(priv->flow_drop_queue);
+ qp = priv->flow_drop_queue->qp;
+ }
+ assert(qp);
+ assert(flow->ibv_attr);
+ flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ if (flow->ibv_flow)
+ return 0;
+ err = errno;
+ msg = "flow rule rejected by device";
+error:
+ return rte_flow_error_set
+ (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
}
/**
* @see rte_flow_create()
* @see rte_flow_ops
*/
-struct rte_flow *
+static struct rte_flow *
mlx4_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
+ const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
+ int err;
- priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
+ err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
+ if (err)
+ return NULL;
+ err = mlx4_flow_toggle(priv, flow, priv->started, error);
+ if (!err) {
LIST_INSERT_HEAD(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
+ return flow;
}
- priv_unlock(priv);
- return flow;
+ rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ error->message);
+ rte_free(flow);
+ return NULL;
}
/**
- * @see rte_flow_isolate()
- *
- * Must be done before calling dev_configure().
- *
- * @param dev
- * Pointer to the ethernet device structure.
- * @param enable
- * Nonzero to enter isolated mode, attempt to leave it otherwise.
- * @param[out] error
- * Perform verbose error reporting if not NULL. PMDs initialize this
- * structure in case of error only.
+ * Configure isolated mode.
*
- * @return
- * 0 on success, a negative value on error.
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
*/
-int
+static int
mlx4_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
- if (priv->rxqs) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "isolated mode must be set"
- " before configuring the device");
- priv_unlock(priv);
- return -rte_errno;
- }
+ if (!!enable == !!priv->isolated)
+ return 0;
priv->isolated = !!enable;
- priv_unlock(priv);
+ if (enable) {
+ mlx4_mac_addr_del(priv);
+ } else if (mlx4_mac_addr_add(priv) < 0) {
+ priv->isolated = 1;
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot leave isolated mode");
+ }
return 0;
}
/**
- * Destroy a flow.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] flow
- * Flow to destroy.
- */
-static void
-priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
-{
- (void)priv;
- LIST_REMOVE(flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
- rte_free(flow->ibv_attr);
- DEBUG("Flow destroyed %p", (void *)flow);
- rte_free(flow);
-}
-
-/**
- * Destroy a flow.
+ * Destroy a flow rule.
*
* @see rte_flow_destroy()
* @see rte_flow_ops
*/
-int
+static int
mlx4_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
+ int err = mlx4_flow_toggle(priv, flow, 0, error);
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, flow);
- priv_unlock(priv);
+ if (err)
+ return err;
+ LIST_REMOVE(flow, next);
+ rte_free(flow);
return 0;
}
/**
- * Destroy all flows.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_flow_flush(struct priv *priv)
-{
- while (!LIST_EMPTY(&priv->flows)) {
- struct rte_flow *flow;
-
- flow = LIST_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
- }
-}
-
-/**
- * Destroy all flows.
+ * Destroy all flow rules.
*
* @see rte_flow_flush()
* @see rte_flow_ops
*/
-int
+static int
mlx4_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv);
- priv_unlock(priv);
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow;
+
+ flow = LIST_FIRST(&priv->flows);
+ mlx4_flow_destroy(dev, flow, error);
+ }
return 0;
}
/**
- * Remove all flows.
- *
- * Called by dev_stop() to remove all flows.
+ * Disable flow rules.
*
* @param priv
* Pointer to private structure.
*/
void
-mlx4_priv_flow_stop(struct priv *priv)
+mlx4_flow_stop(struct priv *priv)
{
struct rte_flow *flow;
for (flow = LIST_FIRST(&priv->flows);
flow;
flow = LIST_NEXT(flow, next)) {
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
- flow->ibv_flow = NULL;
- DEBUG("Flow %p removed", (void *)flow);
+ claim_zero(mlx4_flow_toggle(priv, flow, 0, NULL));
}
mlx4_flow_destroy_drop_queue(priv);
}
/**
- * Add all flows.
+ * Enable flow rules.
*
* @param priv
* Pointer to private structure.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_priv_flow_start(struct priv *priv)
+mlx4_flow_start(struct priv *priv)
{
int ret;
- struct ibv_qp *qp;
struct rte_flow *flow;
ret = mlx4_flow_create_drop_queue(priv);
for (flow = LIST_FIRST(&priv->flows);
flow;
flow = LIST_NEXT(flow, next)) {
- qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
- flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
- if (!flow->ibv_flow) {
- DEBUG("Flow %p cannot be applied", (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
+ ret = mlx4_flow_toggle(priv, flow, 1, NULL);
+ if (unlikely(ret)) {
+ mlx4_flow_stop(priv);
+ return ret;
}
- DEBUG("Flow %p applied", (void *)flow);
}
return 0;
}
+
+static const struct rte_flow_ops mlx4_flow_ops = {
+ .validate = mlx4_flow_validate,
+ .create = mlx4_flow_create,
+ .destroy = mlx4_flow_destroy,
+ .flush = mlx4_flow_flush,
+ .isolate = mlx4_flow_isolate,
+};
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ break;
+ *(const void **)arg = &mlx4_flow_ops;
+ return 0;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}