net/mlx4: fix flow creation before start
[dpdk.git] / drivers / net / mlx4 / mlx4_flow.c
index 65537c7..925c89c 100644 (file)
@@ -103,10 +103,16 @@ struct mlx4_flow_items {
        const enum rte_flow_item_type *const items;
 };
 
+struct rte_flow_drop {
+       struct ibv_qp *qp; /**< Verbs queue pair. */
+       struct ibv_cq *cq; /**< Verbs completion queue. */
+};
+
 /** Valid action for this PMD. */
 static const enum rte_flow_action_type valid_actions[] = {
        RTE_FLOW_ACTION_TYPE_DROP,
        RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_RSS,
        RTE_FLOW_ACTION_TYPE_END,
 };
 
@@ -667,6 +673,76 @@ priv_flow_validate(struct priv *priv,
                        if (!queue || (queue->index > (priv->rxqs_n - 1)))
                                goto exit_action_not_supported;
                        action.queue = 1;
+                       action.queues_n = 1;
+                       action.queues[0] = queue->index;
+               } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+                       int i;
+                       int ierr;
+                       const struct rte_flow_action_rss *rss =
+                               (const struct rte_flow_action_rss *)
+                               actions->conf;
+
+                       if (!priv->hw_rss) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "RSS cannot be used with "
+                                          "the current configuration");
+                               return -rte_errno;
+                       }
+                       if (!priv->isolated) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "RSS cannot be used without "
+                                          "isolated mode");
+                               return -rte_errno;
+                       }
+                       if (!rte_is_power_of_2(rss->num)) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "the number of queues "
+                                          "should be power of two");
+                               return -rte_errno;
+                       }
+                       if (priv->max_rss_tbl_sz < rss->num) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "the number of queues "
+                                          "is too large");
+                               return -rte_errno;
+                       }
+                       /* checking indexes array */
+                       ierr = 0;
+                       for (i = 0; i < rss->num; ++i) {
+                               int j;
+                               if (rss->queue[i] >= priv->rxqs_n)
+                                       ierr = 1;
+                               /*
+                                * Prevent the user from specifying
+                                * the same queue twice in the RSS array.
+                                */
+                               for (j = i + 1; j < rss->num && !ierr; ++j)
+                                       if (rss->queue[j] == rss->queue[i])
+                                               ierr = 1;
+                               if (ierr) {
+                                       rte_flow_error_set(
+                                               error,
+                                               ENOTSUP,
+                                               RTE_FLOW_ERROR_TYPE_HANDLE,
+                                               NULL,
+                                               "RSS action only supports "
+                                               "unique queue indices "
+                                               "in a list");
+                                       return -rte_errno;
+                               }
+                       }
+                       action.queue = 1;
+                       action.queues_n = rss->num;
+                       for (i = 0; i < rss->num; ++i)
+                               action.queues[i] = rss->queue[i];
                } else {
                        goto exit_action_not_supported;
                }
@@ -710,6 +786,163 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
        return ret;
 }
 
+/**
+ * Destroy a drop queue.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+static void
+mlx4_flow_destroy_drop_queue(struct priv *priv)
+{
+       if (priv->flow_drop_queue) {
+               struct rte_flow_drop *fdq = priv->flow_drop_queue;
+
+               priv->flow_drop_queue = NULL;
+               claim_zero(ibv_destroy_qp(fdq->qp));
+               claim_zero(ibv_destroy_cq(fdq->cq));
+               rte_free(fdq);
+       }
+}
+
+/**
+ * Create a single drop queue for all drop flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, negative value otherwise.
+ */
+static int
+mlx4_flow_create_drop_queue(struct priv *priv)
+{
+       struct ibv_qp *qp;
+       struct ibv_cq *cq;
+       struct rte_flow_drop *fdq;
+
+       fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+       if (!fdq) {
+               ERROR("Cannot allocate memory for drop struct");
+               goto err;
+       }
+       cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+                             &(struct ibv_exp_cq_init_attr){
+                                       .comp_mask = 0,
+                             });
+       if (!cq) {
+               ERROR("Cannot create drop CQ");
+               goto err_create_cq;
+       }
+       qp = ibv_exp_create_qp(priv->ctx,
+                             &(struct ibv_exp_qp_init_attr){
+                                       .send_cq = cq,
+                                       .recv_cq = cq,
+                                       .cap = {
+                                               .max_recv_wr = 1,
+                                               .max_recv_sge = 1,
+                                       },
+                                       .qp_type = IBV_QPT_RAW_PACKET,
+                                       .comp_mask =
+                                               IBV_EXP_QP_INIT_ATTR_PD |
+                                               IBV_EXP_QP_INIT_ATTR_PORT,
+                                       .pd = priv->pd,
+                                       .port_num = priv->port,
+                             });
+       if (!qp) {
+               ERROR("Cannot create drop QP");
+               goto err_create_qp;
+       }
+       *fdq = (struct rte_flow_drop){
+               .qp = qp,
+               .cq = cq,
+       };
+       priv->flow_drop_queue = fdq;
+       return 0;
+err_create_qp:
+       claim_zero(ibv_destroy_cq(cq));
+err_create_cq:
+       rte_free(fdq);
+err:
+       return -1;
+}
+
+/**
+ * Get RSS parent rxq structure for given queues.
+ *
+ * Creates a new or returns an existed one.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param queues
+ *   queues indices array, NULL in default RSS case.
+ * @param children_n
+ *   the size of queues array.
+ *
+ * @return
+ *   Pointer to a parent rxq structure, NULL on failure.
+ */
+static struct rxq *
+priv_parent_get(struct priv *priv,
+               uint16_t queues[],
+               uint16_t children_n,
+               struct rte_flow_error *error)
+{
+       unsigned int i;
+       struct rxq *parent;
+
+       for (parent = LIST_FIRST(&priv->parents);
+            parent;
+            parent = LIST_NEXT(parent, next)) {
+               unsigned int same = 0;
+               unsigned int overlap = 0;
+
+               /*
+                * Find out whether an appropriate parent queue already exists
+                * and can be reused, otherwise make sure there are no overlaps.
+                */
+               for (i = 0; i < children_n; ++i) {
+                       unsigned int j;
+
+                       for (j = 0; j < parent->rss.queues_n; ++j) {
+                               if (parent->rss.queues[j] != queues[i])
+                                       continue;
+                               ++overlap;
+                               if (i == j)
+                                       ++same;
+                       }
+               }
+               if (same == children_n &&
+                       children_n == parent->rss.queues_n)
+                       return parent;
+               else if (overlap)
+                       goto error;
+       }
+       /* Exclude the cases when some QPs were created without RSS */
+       for (i = 0; i < children_n; ++i) {
+               struct rxq *rxq = (*priv->rxqs)[queues[i]];
+               if (rxq->qp)
+                       goto error;
+       }
+       parent = priv_parent_create(priv, queues, children_n);
+       if (!parent) {
+               rte_flow_error_set(error,
+                                  ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "flow rule creation failure");
+               return NULL;
+       }
+       return parent;
+
+error:
+       rte_flow_error_set(error,
+                          EEXIST,
+                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                          NULL,
+                          "sharing a queue between several"
+                          " RSS groups is not supported");
+       return NULL;
+}
+
 /**
  * Complete flow rule creation.
  *
@@ -731,9 +964,9 @@ priv_flow_create_action_queue(struct priv *priv,
                              struct mlx4_flow_action *action,
                              struct rte_flow_error *error)
 {
-       struct rxq *rxq;
        struct ibv_qp *qp;
        struct rte_flow *rte_flow;
+       struct rxq *rxq_parent = NULL;
 
        assert(priv->pd);
        assert(priv->ctx);
@@ -743,49 +976,47 @@ priv_flow_create_action_queue(struct priv *priv,
                                   NULL, "cannot allocate flow memory");
                return NULL;
        }
-       rxq = (*priv->rxqs)[action->queue_id];
        if (action->drop) {
-               rte_flow->cq =
-                       ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
-                                         &(struct ibv_exp_cq_init_attr){
-                                                 .comp_mask = 0,
-                                         });
-               if (!rte_flow->cq) {
-                       rte_flow_error_set(error, ENOMEM,
-                                          RTE_FLOW_ERROR_TYPE_HANDLE,
-                                          NULL, "cannot allocate CQ");
-                       goto error;
+               qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
+       } else {
+               int ret;
+               unsigned int i;
+               struct rxq *rxq = NULL;
+
+               if (action->queues_n > 1) {
+                       rxq_parent = priv_parent_get(priv, action->queues,
+                                                    action->queues_n, error);
+                       if (!rxq_parent)
+                               goto error;
                }
-               rte_flow->qp = ibv_exp_create_qp(
-                       priv->ctx,
-                       &(struct ibv_exp_qp_init_attr){
-                               .send_cq = rte_flow->cq,
-                               .recv_cq = rte_flow->cq,
-                               .cap = {
-                                       .max_recv_wr = 1,
-                                       .max_recv_sge = 1,
-                               },
-                               .qp_type = IBV_QPT_RAW_PACKET,
-                               .comp_mask =
-                                       IBV_EXP_QP_INIT_ATTR_PD |
-                                       IBV_EXP_QP_INIT_ATTR_PORT |
-                                       IBV_EXP_QP_INIT_ATTR_RES_DOMAIN,
-                               .pd = priv->pd,
-                               .res_domain = rxq->rd,
-                               .port_num = priv->port,
-                       });
-               if (!rte_flow->qp) {
-                       rte_flow_error_set(error, ENOMEM,
-                                          RTE_FLOW_ERROR_TYPE_HANDLE,
-                                          NULL, "cannot allocate QP");
-                       goto error;
+               for (i = 0; i < action->queues_n; ++i) {
+                       rxq = (*priv->rxqs)[action->queues[i]];
+                       /*
+                        * In case of isolated mode we postpone
+                        * ibv receive queue creation till the first
+                        * rte_flow rule will be applied on that queue.
+                        */
+                       if (!rxq->qp) {
+                               assert(priv->isolated);
+                               ret = rxq_create_qp(rxq, rxq->elts_n,
+                                                   0, 0, rxq_parent);
+                               if (ret) {
+                                       rte_flow_error_set(
+                                               error,
+                                               ENOMEM,
+                                               RTE_FLOW_ERROR_TYPE_HANDLE,
+                                               NULL,
+                                               "flow rule creation failure");
+                                       goto error;
+                               }
+                       }
                }
-               qp = rte_flow->qp;
-       } else {
-               rte_flow->rxq = rxq;
-               qp = rxq->qp;
+               qp = action->queues_n > 1 ? rxq_parent->qp : rxq->qp;
+               rte_flow->qp = qp;
        }
        rte_flow->ibv_attr = ibv_attr;
+       if (!priv->started)
+               return rte_flow;
        rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
        if (!rte_flow->ibv_flow) {
                rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -795,12 +1026,8 @@ priv_flow_create_action_queue(struct priv *priv,
        return rte_flow;
 
 error:
-       assert(rte_flow);
-       if (rte_flow->cq)
-               ibv_destroy_cq(rte_flow->cq);
-       if (rte_flow->qp)
-               ibv_destroy_qp(rte_flow->qp);
-       rte_free(rte_flow->ibv_attr);
+       if (rxq_parent)
+               rxq_parent_cleanup(rxq_parent);
        rte_free(rte_flow);
        return NULL;
 }
@@ -864,11 +1091,22 @@ priv_flow_create(struct priv *priv,
                        continue;
                } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
                        action.queue = 1;
-                       action.queue_id =
+                       action.queues_n = 1;
+                       action.queues[0] =
                                ((const struct rte_flow_action_queue *)
                                 actions->conf)->index;
                } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
                        action.drop = 1;
+               } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+                       unsigned int i;
+                       const struct rte_flow_action_rss *rss =
+                               (const struct rte_flow_action_rss *)
+                                actions->conf;
+
+                       action.queue = 1;
+                       action.queues_n = rss->num;
+                       for (i = 0; i < rss->num; ++i)
+                               action.queues[i] = rss->queue[i];
                } else {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ACTION,
@@ -878,7 +1116,8 @@ priv_flow_create(struct priv *priv,
        }
        rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
                                                 &action, error);
-       return rte_flow;
+       if (rte_flow)
+               return rte_flow;
 exit:
        rte_free(flow.ibv_attr);
        return NULL;
@@ -910,6 +1149,43 @@ mlx4_flow_create(struct rte_eth_dev *dev,
        return flow;
 }
 
+/**
+ * @see rte_flow_isolate()
+ *
+ * Must be done before calling dev_configure().
+ *
+ * @param dev
+ *   Pointer to the ethernet device structure.
+ * @param enable
+ *   Nonzero to enter isolated mode, attempt to leave it otherwise.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   0 on success, a negative value on error.
+ */
+int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+                 int enable,
+                 struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+
+       priv_lock(priv);
+       if (priv->rxqs) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "isolated mode must be set"
+                                  " before configuring the device");
+               priv_unlock(priv);
+               return -rte_errno;
+       }
+       priv->isolated = !!enable;
+       priv_unlock(priv);
+       return 0;
+}
+
 /**
  * Destroy a flow.
  *
@@ -925,10 +1201,6 @@ priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
        LIST_REMOVE(flow, next);
        if (flow->ibv_flow)
                claim_zero(ibv_destroy_flow(flow->ibv_flow));
-       if (flow->qp)
-               claim_zero(ibv_destroy_qp(flow->qp));
-       if (flow->cq)
-               claim_zero(ibv_destroy_cq(flow->cq));
        rte_free(flow->ibv_attr);
        DEBUG("Flow destroyed %p", (void *)flow);
        rte_free(flow);
@@ -1010,6 +1282,7 @@ mlx4_priv_flow_stop(struct priv *priv)
                flow->ibv_flow = NULL;
                DEBUG("Flow %p removed", (void *)flow);
        }
+       mlx4_flow_destroy_drop_queue(priv);
 }
 
 /**
@@ -1024,13 +1297,17 @@ mlx4_priv_flow_stop(struct priv *priv)
 int
 mlx4_priv_flow_start(struct priv *priv)
 {
+       int ret;
        struct ibv_qp *qp;
        struct rte_flow *flow;
 
+       ret = mlx4_flow_create_drop_queue(priv);
+       if (ret)
+               return -1;
        for (flow = LIST_FIRST(&priv->flows);
             flow;
             flow = LIST_NEXT(flow, next)) {
-               qp = flow->qp ? flow->qp : flow->rxq->qp;
+               qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
                flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
                if (!flow->ibv_flow) {
                        DEBUG("Flow %p cannot be applied", (void *)flow);