/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-pedantic"
+#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
#ifdef PEDANTIC
-#pragma GCC diagnostic error "-pedantic"
+#pragma GCC diagnostic error "-Wpedantic"
#endif
/* DPDK headers don't like -pedantic. */
#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-pedantic"
+#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
#ifdef PEDANTIC
-#pragma GCC diagnostic error "-pedantic"
+#pragma GCC diagnostic error "-Wpedantic"
#endif
#include "mlx5.h"
struct mlx5_fdir_filter {
LIST_ENTRY(mlx5_fdir_filter) next;
uint16_t queue; /* Queue assigned to if FDIR match. */
+ enum rte_eth_fdir_behavior behavior;
struct fdir_flow_desc desc;
struct ibv_exp_flow *flow;
};
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->type = HASH_RXQ_IPV4;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
desc->type = HASH_RXQ_UDPV6;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
desc->type = HASH_RXQ_IPV6;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
(desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
(desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
return 0;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
-#ifdef HAVE_FLOW_SPEC_IPV6
struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
spec_offset += spec_ipv4->size;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
spec_offset += spec_ipv6->size;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
ERROR("invalid flow attribute type");
return EINVAL;
}
/**
- * Get flow director queue for a specific RX queue, create it in case
- * it does not exist.
+ * Destroy a flow director queue.
+ *
+ * @param fdir_queue
+ * Flow director queue to be destroyed.
+ */
+void
+priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
+{
+ struct mlx5_fdir_filter *fdir_filter;
+
+ /* Disable filter flows still applying to this queue. */
+ LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
+ unsigned int idx = fdir_filter->queue;
+ struct rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
+
+ assert(idx < priv->rxqs_n);
+ if (fdir_queue == rxq_ctrl->fdir_queue &&
+ fdir_filter->flow != NULL) {
+ claim_zero(ibv_exp_destroy_flow(fdir_filter->flow));
+ fdir_filter->flow = NULL;
+ }
+ }
+ assert(fdir_queue->qp);
+ claim_zero(ibv_destroy_qp(fdir_queue->qp));
+ assert(fdir_queue->ind_table);
+ claim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue->ind_table));
+ if (fdir_queue->wq)
+ claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
+ if (fdir_queue->cq)
+ claim_zero(ibv_destroy_cq(fdir_queue->cq));
+#ifndef NDEBUG
+ memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
+#endif
+ rte_free(fdir_queue);
+}
+
+/**
+ * Create a flow director queue.
*
* @param priv
* Private structure.
- * @param idx
- * RX queue index.
+ * @param wq
+ * Work queue to route matched packets to, NULL if one needs to
+ * be created.
*
* @return
* Related flow director queue on success, NULL otherwise.
*/
static struct fdir_queue *
-priv_get_fdir_queue(struct priv *priv, uint16_t idx)
+priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,
+ unsigned int socket)
{
- struct fdir_queue *fdir_queue = &(*priv->rxqs)[idx]->fdir_queue;
- struct ibv_exp_rwq_ind_table *ind_table = NULL;
- struct ibv_qp *qp = NULL;
- struct ibv_exp_rwq_ind_table_init_attr ind_init_attr;
- struct ibv_exp_rx_hash_conf hash_conf;
- struct ibv_exp_qp_init_attr qp_init_attr;
- int err = 0;
-
- /* Return immediately if it has already been created. */
- if (fdir_queue->qp != NULL)
- return fdir_queue;
-
- ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){
- .pd = priv->pd,
- .log_ind_tbl_size = 0,
- .ind_tbl = &((*priv->rxqs)[idx]->wq),
- .comp_mask = 0,
- };
+ struct fdir_queue *fdir_queue;
- errno = 0;
- ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
- &ind_init_attr);
- if (ind_table == NULL) {
- /* Not clear whether errno is set. */
- err = (errno ? errno : EINVAL);
- ERROR("RX indirection table creation failed with error %d: %s",
- err, strerror(err));
+ fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
+ 0, socket);
+ if (!fdir_queue) {
+ ERROR("cannot allocate flow director queue");
+ return NULL;
+ }
+ assert(priv->pd);
+ assert(priv->ctx);
+ if (!wq) {
+ fdir_queue->cq = ibv_exp_create_cq(
+ priv->ctx, 1, NULL, NULL, 0,
+ &(struct ibv_exp_cq_init_attr){
+ .comp_mask = 0,
+ });
+ if (!fdir_queue->cq) {
+ ERROR("cannot create flow director CQ");
+ goto error;
+ }
+ fdir_queue->wq = ibv_exp_create_wq(
+ priv->ctx,
+ &(struct ibv_exp_wq_init_attr){
+ .wq_type = IBV_EXP_WQT_RQ,
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ .pd = priv->pd,
+ .cq = fdir_queue->cq,
+ });
+ if (!fdir_queue->wq) {
+ ERROR("cannot create flow director WQ");
+ goto error;
+ }
+ wq = fdir_queue->wq;
+ }
+ fdir_queue->ind_table = ibv_exp_create_rwq_ind_table(
+ priv->ctx,
+ &(struct ibv_exp_rwq_ind_table_init_attr){
+ .pd = priv->pd,
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &wq,
+ .comp_mask = 0,
+ });
+ if (!fdir_queue->ind_table) {
+ ERROR("cannot create flow director indirection table");
goto error;
}
-
- /* Create fdir_queue qp. */
- hash_conf = (struct ibv_exp_rx_hash_conf){
- .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- .rwq_ind_tbl = ind_table,
- };
- qp_init_attr = (struct ibv_exp_qp_init_attr){
- .max_inl_recv = 0, /* Currently not supported. */
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RX_HASH),
- .pd = priv->pd,
- .rx_hash_conf = &hash_conf,
- .port_num = priv->port,
- };
-
- qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr);
- if (qp == NULL) {
- err = (errno ? errno : EINVAL);
- ERROR("hash RX QP creation failure: %s", strerror(err));
+ fdir_queue->qp = ibv_exp_create_qp(
+ priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH,
+ .pd = priv->pd,
+ .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ .rx_hash_function =
+ IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ .rwq_ind_tbl = fdir_queue->ind_table,
+ },
+ .port_num = priv->port,
+ });
+ if (!fdir_queue->qp) {
+ ERROR("cannot create flow director hash RX QP");
goto error;
}
+ return fdir_queue;
+error:
+ assert(fdir_queue);
+ assert(!fdir_queue->qp);
+ if (fdir_queue->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table
+ (fdir_queue->ind_table));
+ if (fdir_queue->wq)
+ claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
+ if (fdir_queue->cq)
+ claim_zero(ibv_destroy_cq(fdir_queue->cq));
+ rte_free(fdir_queue);
+ return NULL;
+}
- fdir_queue->ind_table = ind_table;
- fdir_queue->qp = qp;
+/**
+ * Get flow director queue for a specific RX queue, create it in case
+ * it does not exist.
+ *
+ * @param priv
+ * Private structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Related flow director queue on success, NULL otherwise.
+ */
+static struct fdir_queue *
+priv_get_fdir_queue(struct priv *priv, uint16_t idx)
+{
+ struct rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
+ struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
+ assert(rxq_ctrl->wq);
+ if (fdir_queue == NULL) {
+ fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
+ rxq_ctrl->socket);
+ rxq_ctrl->fdir_queue = fdir_queue;
+ }
return fdir_queue;
+}
-error:
- if (qp != NULL)
- claim_zero(ibv_destroy_qp(qp));
-
- if (ind_table != NULL)
- claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+/**
+ * Get or flow director drop queue. Create it if it does not exist.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * Flow director drop queue on success, NULL otherwise.
+ */
+static struct fdir_queue *
+priv_get_fdir_drop_queue(struct priv *priv)
+{
+ struct fdir_queue *fdir_queue = priv->fdir_drop_queue;
- return NULL;
+ if (fdir_queue == NULL) {
+ unsigned int socket = SOCKET_ID_ANY;
+
+ /* Select a known NUMA socket if possible. */
+ if (priv->rxqs_n && (*priv->rxqs)[0])
+ socket = container_of((*priv->rxqs)[0],
+ struct rxq_ctrl, rxq)->socket;
+ fdir_queue = priv_fdir_queue_create(priv, NULL, socket);
+ priv->fdir_drop_queue = fdir_queue;
+ }
+ return fdir_queue;
}
/**
return 0;
/* Get fdir_queue for specific queue. */
- fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue);
+ if (mlx5_fdir_filter->behavior == RTE_ETH_FDIR_REJECT)
+ fdir_queue = priv_get_fdir_drop_queue(priv);
+ else
+ fdir_queue = priv_get_fdir_queue(priv,
+ mlx5_fdir_filter->queue);
if (fdir_queue == NULL) {
ERROR("failed to create flow director rxq for queue %d",
{
unsigned int i;
struct mlx5_fdir_filter *mlx5_fdir_filter;
- struct fdir_queue *fdir_queue;
/* Run on every flow director filter and destroy flow handle. */
LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
}
}
- /* Run on every RX queue to destroy related flow director QP and
- * indirection table. */
+ /* Destroy flow director context in each RX queue. */
for (i = 0; (i != priv->rxqs_n); i++) {
- fdir_queue = &(*priv->rxqs)[i]->fdir_queue;
-
- if (fdir_queue->qp != NULL) {
- claim_zero(ibv_destroy_qp(fdir_queue->qp));
- fdir_queue->qp = NULL;
- }
+ struct rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
- if (fdir_queue->ind_table != NULL) {
- claim_zero(ibv_exp_destroy_rwq_ind_table
- (fdir_queue->ind_table));
- fdir_queue->ind_table = NULL;
- }
+ if (!rxq_ctrl->fdir_queue)
+ continue;
+ priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
+ rxq_ctrl->fdir_queue = NULL;
+ }
+ if (priv->fdir_drop_queue) {
+ priv_fdir_queue_destroy(priv, priv->fdir_drop_queue);
+ priv->fdir_drop_queue = NULL;
}
}
return err;
}
- /* Set queue. */
+ /* Set action parameters. */
mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
+ mlx5_fdir_filter->behavior = fdir_filter->action.behavior;
/* Convert to mlx5 filter descriptor. */
fdir_filter_to_flow_desc(fdir_filter,
return ret;
}
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .query = NULL,
+};
+
/**
* Manage filter operations.
*
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = EINVAL;
struct priv *priv = dev->data->dev_private;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
case RTE_ETH_FILTER_FDIR:
priv_lock(priv);
ret = priv_fdir_ctrl_func(priv, filter_op, arg);
break;
}
- return ret;
+ return -ret;
}