net/mlx5: change eth device reference for secondary process
[dpdk.git] / drivers / net / mlx5 / mlx5_fdir.c
index 1850218..66e3818 100644 (file)
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
 #ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-pedantic"
+#pragma GCC diagnostic ignored "-Wpedantic"
 #endif
 #include <infiniband/verbs.h>
 #ifdef PEDANTIC
-#pragma GCC diagnostic error "-pedantic"
+#pragma GCC diagnostic error "-Wpedantic"
 #endif
 
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-pedantic"
-#endif
 #include <rte_ether.h>
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-pedantic"
-#endif
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -75,8 +70,9 @@ struct fdir_flow_desc {
 struct mlx5_fdir_filter {
        LIST_ENTRY(mlx5_fdir_filter) next;
        uint16_t queue; /* Queue assigned to if FDIR match. */
+       enum rte_eth_fdir_behavior behavior;
        struct fdir_flow_desc desc;
-       struct ibv_exp_flow *flow;
+       struct ibv_flow *flow;
 };
 
 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
@@ -141,6 +137,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
                desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
                desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+               /* fallthrough */
        case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
                desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
                desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
@@ -241,19 +238,19 @@ priv_fdir_flow_add(struct priv *priv,
                   struct mlx5_fdir_filter *mlx5_fdir_filter,
                   struct fdir_queue *fdir_queue)
 {
-       struct ibv_exp_flow *flow;
+       struct ibv_flow *flow;
        struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
        enum rte_fdir_mode fdir_mode =
                priv->dev->data->dev_conf.fdir_conf.mode;
        struct rte_eth_fdir_masks *mask =
                &priv->dev->data->dev_conf.fdir_conf.mask;
        FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
-       struct ibv_exp_flow_attr *attr = &data->attr;
+       struct ibv_flow_attr *attr = &data->attr;
        uintptr_t spec_offset = (uintptr_t)&data->spec;
-       struct ibv_exp_flow_spec_eth *spec_eth;
-       struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
-       struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
-       struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
+       struct ibv_flow_spec_eth *spec_eth;
+       struct ibv_flow_spec_ipv4 *spec_ipv4;
+       struct ibv_flow_spec_ipv6 *spec_ipv6;
+       struct ibv_flow_spec_tcp_udp *spec_tcp_udp;
        struct mlx5_fdir_filter *iter_fdir_filter;
        unsigned int i;
 
@@ -275,10 +272,10 @@ priv_fdir_flow_add(struct priv *priv,
        priv_flow_attr(priv, attr, sizeof(data), desc->type);
 
        /* Set Ethernet spec */
-       spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
+       spec_eth = (struct ibv_flow_spec_eth *)spec_offset;
 
        /* The first specification must be Ethernet. */
-       assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
+       assert(spec_eth->type == IBV_FLOW_SPEC_ETH);
        assert(spec_eth->size == sizeof(*spec_eth));
 
        /* VLAN ID */
@@ -305,10 +302,10 @@ priv_fdir_flow_add(struct priv *priv,
                spec_offset += spec_eth->size;
 
                /* Set IP spec */
-               spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
+               spec_ipv4 = (struct ibv_flow_spec_ipv4 *)spec_offset;
 
                /* The second specification must be IP. */
-               assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
+               assert(spec_ipv4->type == IBV_FLOW_SPEC_IPV4);
                assert(spec_ipv4->size == sizeof(*spec_ipv4));
 
                spec_ipv4->val.src_ip =
@@ -332,10 +329,10 @@ priv_fdir_flow_add(struct priv *priv,
                spec_offset += spec_eth->size;
 
                /* Set IP spec */
-               spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
+               spec_ipv6 = (struct ibv_flow_spec_ipv6 *)spec_offset;
 
                /* The second specification must be IP. */
-               assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
+               assert(spec_ipv6->type == IBV_FLOW_SPEC_IPV6);
                assert(spec_ipv6->size == sizeof(*spec_ipv6));
 
                for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
@@ -365,11 +362,11 @@ priv_fdir_flow_add(struct priv *priv,
        }
 
        /* Set TCP/UDP flow specification. */
-       spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
+       spec_tcp_udp = (struct ibv_flow_spec_tcp_udp *)spec_offset;
 
        /* The third specification must be TCP/UDP. */
-       assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
-              spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
+       assert(spec_tcp_udp->type == IBV_FLOW_SPEC_TCP ||
+              spec_tcp_udp->type == IBV_FLOW_SPEC_UDP);
        assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
 
        spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
@@ -383,7 +380,7 @@ priv_fdir_flow_add(struct priv *priv,
 create_flow:
 
        errno = 0;
-       flow = ibv_exp_create_flow(fdir_queue->qp, attr);
+       flow = ibv_create_flow(fdir_queue->qp, attr);
        if (flow == NULL) {
                /* It's not clear whether errno is always set in this case. */
                ERROR("%p: flow director configuration failed, errno=%d: %s",
@@ -399,6 +396,140 @@ create_flow:
        return 0;
 }
 
+/**
+ * Destroy a flow director queue.
+ *
+ * @param fdir_queue
+ *   Flow director queue to be destroyed.
+ */
+void
+priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
+{
+       struct mlx5_fdir_filter *fdir_filter;
+
+       /* Disable filter flows still applying to this queue. */
+       LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
+               unsigned int idx = fdir_filter->queue;
+               struct rxq_ctrl *rxq_ctrl =
+                       container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
+
+               assert(idx < priv->rxqs_n);
+               if (fdir_queue == rxq_ctrl->fdir_queue &&
+                   fdir_filter->flow != NULL) {
+                       claim_zero(ibv_destroy_flow(fdir_filter->flow));
+                       fdir_filter->flow = NULL;
+               }
+       }
+       assert(fdir_queue->qp);
+       claim_zero(ibv_destroy_qp(fdir_queue->qp));
+       assert(fdir_queue->ind_table);
+       claim_zero(ibv_destroy_rwq_ind_table(fdir_queue->ind_table));
+       if (fdir_queue->wq)
+               claim_zero(ibv_destroy_wq(fdir_queue->wq));
+       if (fdir_queue->cq)
+               claim_zero(ibv_destroy_cq(fdir_queue->cq));
+#ifndef NDEBUG
+       memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
+#endif
+       rte_free(fdir_queue);
+}
+
+/**
+ * Create a flow director queue.
+ *
+ * @param priv
+ *   Private structure.
+ * @param wq
+ *   Work queue to route matched packets to, NULL if one needs to
+ *   be created.
+ *
+ * @return
+ *   Related flow director queue on success, NULL otherwise.
+ */
+static struct fdir_queue *
+priv_fdir_queue_create(struct priv *priv, struct ibv_wq *wq,
+                      unsigned int socket)
+{
+       struct fdir_queue *fdir_queue;
+
+       fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
+                                      0, socket);
+       if (!fdir_queue) {
+               ERROR("cannot allocate flow director queue");
+               return NULL;
+       }
+       assert(priv->pd);
+       assert(priv->ctx);
+       if (!wq) {
+               fdir_queue->cq = ibv_create_cq(
+                       priv->ctx, 1, NULL, NULL, 0);
+               if (!fdir_queue->cq) {
+                       ERROR("cannot create flow director CQ");
+                       goto error;
+               }
+               fdir_queue->wq = ibv_create_wq(
+                       priv->ctx,
+                       &(struct ibv_wq_init_attr){
+                               .wq_type = IBV_WQT_RQ,
+                               .max_wr = 1,
+                               .max_sge = 1,
+                               .pd = priv->pd,
+                               .cq = fdir_queue->cq,
+                       });
+               if (!fdir_queue->wq) {
+                       ERROR("cannot create flow director WQ");
+                       goto error;
+               }
+               wq = fdir_queue->wq;
+       }
+       fdir_queue->ind_table = ibv_create_rwq_ind_table(
+               priv->ctx,
+               &(struct ibv_rwq_ind_table_init_attr){
+                       .log_ind_tbl_size = 0,
+                       .ind_tbl = &wq,
+                       .comp_mask = 0,
+               });
+       if (!fdir_queue->ind_table) {
+               ERROR("cannot create flow director indirection table");
+               goto error;
+       }
+       fdir_queue->qp = ibv_create_qp_ex(
+               priv->ctx,
+               &(struct ibv_qp_init_attr_ex){
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .comp_mask =
+                               IBV_QP_INIT_ATTR_PD |
+                               IBV_QP_INIT_ATTR_IND_TABLE |
+                               IBV_QP_INIT_ATTR_RX_HASH,
+                       .rx_hash_conf = (struct ibv_rx_hash_conf){
+                               .rx_hash_function =
+                                       IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = rss_hash_default_key_len,
+                               .rx_hash_key = rss_hash_default_key,
+                               .rx_hash_fields_mask = 0,
+                       },
+                       .rwq_ind_tbl = fdir_queue->ind_table,
+                       .pd = priv->pd,
+               });
+       if (!fdir_queue->qp) {
+               ERROR("cannot create flow director hash RX QP");
+               goto error;
+       }
+       return fdir_queue;
+error:
+       assert(fdir_queue);
+       assert(!fdir_queue->qp);
+       if (fdir_queue->ind_table)
+               claim_zero(ibv_destroy_rwq_ind_table
+                          (fdir_queue->ind_table));
+       if (fdir_queue->wq)
+               claim_zero(ibv_destroy_wq(fdir_queue->wq));
+       if (fdir_queue->cq)
+               claim_zero(ibv_destroy_cq(fdir_queue->cq));
+       rte_free(fdir_queue);
+       return NULL;
+}
+
 /**
  * Get flow director queue for a specific RX queue, create it in case
  * it does not exist.
@@ -416,74 +547,42 @@ priv_get_fdir_queue(struct priv *priv, uint16_t idx)
 {
        struct rxq_ctrl *rxq_ctrl =
                container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
-       struct fdir_queue *fdir_queue = &rxq_ctrl->fdir_queue;
-       struct ibv_exp_rwq_ind_table *ind_table = NULL;
-       struct ibv_qp *qp = NULL;
-       struct ibv_exp_rwq_ind_table_init_attr ind_init_attr;
-       struct ibv_exp_rx_hash_conf hash_conf;
-       struct ibv_exp_qp_init_attr qp_init_attr;
-       int err = 0;
-
-       /* Return immediately if it has already been created. */
-       if (fdir_queue->qp != NULL)
-               return fdir_queue;
-
-       ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){
-               .pd = priv->pd,
-               .log_ind_tbl_size = 0,
-               .ind_tbl = &((*priv->rxqs)[idx]->wq),
-               .comp_mask = 0,
-       };
+       struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
 
-       errno = 0;
-       ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
-                                                &ind_init_attr);
-       if (ind_table == NULL) {
-               /* Not clear whether errno is set. */
-               err = (errno ? errno : EINVAL);
-               ERROR("RX indirection table creation failed with error %d: %s",
-                     err, strerror(err));
-               goto error;
-       }
-
-       /* Create fdir_queue qp. */
-       hash_conf = (struct ibv_exp_rx_hash_conf){
-               .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
-               .rx_hash_key_len = rss_hash_default_key_len,
-               .rx_hash_key = rss_hash_default_key,
-               .rx_hash_fields_mask = 0,
-               .rwq_ind_tbl = ind_table,
-       };
-       qp_init_attr = (struct ibv_exp_qp_init_attr){
-               .max_inl_recv = 0, /* Currently not supported. */
-               .qp_type = IBV_QPT_RAW_PACKET,
-               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
-                             IBV_EXP_QP_INIT_ATTR_RX_HASH),
-               .pd = priv->pd,
-               .rx_hash_conf = &hash_conf,
-               .port_num = priv->port,
-       };
-
-       qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr);
-       if (qp == NULL) {
-               err = (errno ? errno : EINVAL);
-               ERROR("hash RX QP creation failure: %s", strerror(err));
-               goto error;
+       assert(rxq_ctrl->wq);
+       if (fdir_queue == NULL) {
+               fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
+                                                   rxq_ctrl->socket);
+               rxq_ctrl->fdir_queue = fdir_queue;
        }
-
-       fdir_queue->ind_table = ind_table;
-       fdir_queue->qp = qp;
-
        return fdir_queue;
+}
 
-error:
-       if (qp != NULL)
-               claim_zero(ibv_destroy_qp(qp));
-
-       if (ind_table != NULL)
-               claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+/**
+ * Get or flow director drop queue. Create it if it does not exist.
+ *
+ * @param priv
+ *   Private structure.
+ *
+ * @return
+ *   Flow director drop queue on success, NULL otherwise.
+ */
+static struct fdir_queue *
+priv_get_fdir_drop_queue(struct priv *priv)
+{
+       struct fdir_queue *fdir_queue = priv->fdir_drop_queue;
 
-       return NULL;
+       if (fdir_queue == NULL) {
+               unsigned int socket = SOCKET_ID_ANY;
+
+               /* Select a known NUMA socket if possible. */
+               if (priv->rxqs_n && (*priv->rxqs)[0])
+                       socket = container_of((*priv->rxqs)[0],
+                                             struct rxq_ctrl, rxq)->socket;
+               fdir_queue = priv_fdir_queue_create(priv, NULL, socket);
+               priv->fdir_drop_queue = fdir_queue;
+       }
+       return fdir_queue;
 }
 
 /**
@@ -508,7 +607,11 @@ priv_fdir_filter_enable(struct priv *priv,
                return 0;
 
        /* Get fdir_queue for specific queue. */
-       fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue);
+       if (mlx5_fdir_filter->behavior == RTE_ETH_FDIR_REJECT)
+               fdir_queue = priv_get_fdir_drop_queue(priv);
+       else
+               fdir_queue = priv_get_fdir_queue(priv,
+                                                mlx5_fdir_filter->queue);
 
        if (fdir_queue == NULL) {
                ERROR("failed to create flow director rxq for queue %d",
@@ -565,13 +668,13 @@ priv_fdir_filter_flush(struct priv *priv)
        struct mlx5_fdir_filter *mlx5_fdir_filter;
 
        while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
-               struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
+               struct ibv_flow *flow = mlx5_fdir_filter->flow;
 
                DEBUG("%p: flushing flow director filter %p",
                      (void *)priv, (void *)mlx5_fdir_filter);
                LIST_REMOVE(mlx5_fdir_filter, next);
                if (flow != NULL)
-                       claim_zero(ibv_exp_destroy_flow(flow));
+                       claim_zero(ibv_destroy_flow(flow));
                rte_free(mlx5_fdir_filter);
        }
 }
@@ -601,11 +704,10 @@ priv_fdir_disable(struct priv *priv)
 {
        unsigned int i;
        struct mlx5_fdir_filter *mlx5_fdir_filter;
-       struct fdir_queue *fdir_queue;
 
        /* Run on every flow director filter and destroy flow handle. */
        LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
-               struct ibv_exp_flow *flow;
+               struct ibv_flow *flow;
 
                /* Only valid elements should be in the list */
                assert(mlx5_fdir_filter != NULL);
@@ -613,28 +715,26 @@ priv_fdir_disable(struct priv *priv)
 
                /* Destroy flow handle */
                if (flow != NULL) {
-                       claim_zero(ibv_exp_destroy_flow(flow));
+                       claim_zero(ibv_destroy_flow(flow));
                        mlx5_fdir_filter->flow = NULL;
                }
        }
 
-       /* Run on every RX queue to destroy related flow director QP and
-        * indirection table. */
+       /* Destroy flow director context in each RX queue. */
        for (i = 0; (i != priv->rxqs_n); i++) {
-               struct rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
-
-               fdir_queue = &rxq_ctrl->fdir_queue;
-               if (fdir_queue->qp != NULL) {
-                       claim_zero(ibv_destroy_qp(fdir_queue->qp));
-                       fdir_queue->qp = NULL;
-               }
-
-               if (fdir_queue->ind_table != NULL) {
-                       claim_zero(ibv_exp_destroy_rwq_ind_table
-                                  (fdir_queue->ind_table));
-                       fdir_queue->ind_table = NULL;
-               }
+               struct rxq_ctrl *rxq_ctrl;
+
+               if (!(*priv->rxqs)[i])
+                       continue;
+               rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
+               if (!rxq_ctrl->fdir_queue)
+                       continue;
+               priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
+               rxq_ctrl->fdir_queue = NULL;
+       }
+       if (priv->fdir_drop_queue) {
+               priv_fdir_queue_destroy(priv, priv->fdir_drop_queue);
+               priv->fdir_drop_queue = NULL;
        }
 }
 
@@ -736,8 +836,9 @@ priv_fdir_filter_add(struct priv *priv,
                return err;
        }
 
-       /* Set queue. */
+       /* Set action parameters. */
        mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
+       mlx5_fdir_filter->behavior = fdir_filter->action.behavior;
 
        /* Convert to mlx5 filter descriptor. */
        fdir_filter_to_flow_desc(fdir_filter,
@@ -781,7 +882,7 @@ priv_fdir_filter_update(struct priv *priv,
 
        mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
        if (mlx5_fdir_filter != NULL) {
-               struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
+               struct ibv_flow *flow = mlx5_fdir_filter->flow;
                int err = 0;
 
                /* Update queue number. */
@@ -789,7 +890,7 @@ priv_fdir_filter_update(struct priv *priv,
 
                /* Destroy flow handle. */
                if (flow != NULL) {
-                       claim_zero(ibv_exp_destroy_flow(flow));
+                       claim_zero(ibv_destroy_flow(flow));
                        mlx5_fdir_filter->flow = NULL;
                }
                DEBUG("%p: flow director filter %p updated",
@@ -827,14 +928,14 @@ priv_fdir_filter_delete(struct priv *priv,
 
        mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
        if (mlx5_fdir_filter != NULL) {
-               struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
+               struct ibv_flow *flow = mlx5_fdir_filter->flow;
 
                /* Remove element from list. */
                LIST_REMOVE(mlx5_fdir_filter, next);
 
                /* Destroy flow handle. */
                if (flow != NULL) {
-                       claim_zero(ibv_exp_destroy_flow(flow));
+                       claim_zero(ibv_destroy_flow(flow));
                        mlx5_fdir_filter->flow = NULL;
                }
 
@@ -934,6 +1035,15 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
        return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+       .validate = mlx5_flow_validate,
+       .create = mlx5_flow_create,
+       .destroy = mlx5_flow_destroy,
+       .flush = mlx5_flow_flush,
+       .query = NULL,
+       .isolate = mlx5_flow_isolate,
+};
+
 /**
  * Manage filter operations.
  *
@@ -955,10 +1065,17 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
                     enum rte_filter_op filter_op,
                     void *arg)
 {
-       int ret = -EINVAL;
+       int ret = EINVAL;
        struct priv *priv = dev->data->dev_private;
 
+       if (mlx5_is_secondary())
+               return -E_RTE_SECONDARY;
        switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+               *(const void **)arg = &mlx5_flow_ops;
+               return 0;
        case RTE_ETH_FILTER_FDIR:
                priv_lock(priv);
                ret = priv_fdir_ctrl_func(priv, filter_op, arg);
@@ -970,5 +1087,5 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
                break;
        }
 
-       return ret;
+       return -ret;
 }