net/mlx5: handle drop queues as regular queues
authorNelio Laranjeiro <nelio.laranjeiro@6wind.com>
Thu, 12 Jul 2018 09:30:48 +0000 (11:30 +0200)
committerShahaf Shuler <shahafs@mellanox.com>
Thu, 12 Jul 2018 10:10:01 +0000 (12:10 +0200)
Drop queues are essentially used in flows due to Verbs API, the
information if the fate of the flow is a drop or not is already present
in the flow.  Due to this, drop queues can be fully mapped on regular
queues.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h

index 1b393c3..60305b1 100644 (file)
@@ -261,7 +261,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                priv->txqs_n = 0;
                priv->txqs = NULL;
        }
-       mlx5_flow_delete_drop_queue(dev);
        mlx5_mprq_free_mp(dev);
        mlx5_mr_release(dev);
        if (priv->pd != NULL) {
@@ -1139,22 +1138,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_link_update(eth_dev, 0);
        /* Store device configuration on private structure. */
        priv->config = config;
-       /* Create drop queue. */
-       err = mlx5_flow_create_drop_queue(eth_dev);
-       if (err) {
-               DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
-                       eth_dev->data->port_id, strerror(rte_errno));
-               err = rte_errno;
-               goto error;
-       }
        /* Supported Verbs flow priority number detection. */
-       if (verb_priorities == 0)
-               verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
-       if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
-               DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
-                       eth_dev->data->port_id, verb_priorities);
-               err = ENOTSUP;
-               goto error;
+       if (verb_priorities == 0) {
+               err = mlx5_verbs_max_prio(eth_dev);
+               if (err < 0) {
+                       DRV_LOG(ERR, "port %u wrong Verbs flow priorities",
+                               eth_dev->data->port_id);
+                       goto error;
+               }
+               verb_priorities = err;
        }
        priv->config.max_verbs_prio = verb_priorities;
        /*
index cc01310..2274298 100644 (file)
@@ -139,9 +139,6 @@ enum mlx5_verbs_alloc_type {
        MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
 };
 
-/* 8 Verbs priorities. */
-#define MLX5_VERBS_FLOW_PRIO_8 8
-
 /**
  * Verbs allocator needs a context to know in the callback which kind of
  * resources it is allocating.
@@ -153,6 +150,12 @@ struct mlx5_verbs_alloc_ctx {
 
 LIST_HEAD(mlx5_mr_list, mlx5_mr);
 
+/* Flow drop context necessary due to Verbs API. */
+struct mlx5_drop {
+       struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
+       struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
+};
+
 struct priv {
        LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
        struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
@@ -182,7 +185,7 @@ struct priv {
        struct rte_intr_handle intr_handle; /* Interrupt handler. */
        unsigned int (*reta_idx)[]; /* RETA index table. */
        unsigned int reta_idx_n; /* RETA index size. */
-       struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+       struct mlx5_drop drop_queue; /* Flow drop queues. */
        struct mlx5_flows flows; /* RTE Flow rules. */
        struct mlx5_flows ctrl_flows; /* Control flow rules. */
        struct {
@@ -314,7 +317,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev);
 
 /* mlx5_flow.c */
 
-unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
+int mlx5_verbs_max_prio(struct rte_eth_dev *dev);
+void mlx5_flow_print(struct rte_flow *flow);
 int mlx5_flow_validate(struct rte_eth_dev *dev,
                       const struct rte_flow_attr *attr,
                       const struct rte_flow_item items[],
index a45cb06..5e325be 100644 (file)
@@ -75,6 +75,58 @@ struct ibv_spec_header {
        uint16_t size;
 };
 
+ /**
+  * Get the maximum number of priority available.
+  *
+  * @param[in] dev
+  *   Pointer to Ethernet device.
+  *
+  * @return
+  *   number of supported Verbs flow priority on success, a negative errno
+  *   value otherwise and rte_errno is set.
+  */
+int
+mlx5_verbs_max_prio(struct rte_eth_dev *dev)
+{
+       struct {
+               struct ibv_flow_attr attr;
+               struct ibv_flow_spec_eth eth;
+               struct ibv_flow_spec_action_drop drop;
+       } flow_attr = {
+               .attr = {
+                       .num_of_specs = 2,
+               },
+               .eth = {
+                       .type = IBV_FLOW_SPEC_ETH,
+                       .size = sizeof(struct ibv_flow_spec_eth),
+               },
+               .drop = {
+                       .size = sizeof(struct ibv_flow_spec_action_drop),
+                       .type = IBV_FLOW_SPEC_ACTION_DROP,
+               },
+       };
+       struct ibv_flow *flow;
+       uint32_t verb_priorities;
+       struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
+
+       if (!drop) {
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       for (verb_priorities = 0; 1; verb_priorities++) {
+               flow_attr.attr.priority = verb_priorities;
+               flow = mlx5_glue->create_flow(drop->qp,
+                                             &flow_attr.attr);
+               if (!flow)
+                       break;
+               claim_zero(mlx5_glue->destroy_flow(flow));
+       }
+       mlx5_hrxq_drop_release(dev);
+       DRV_LOG(INFO, "port %u flow maximum priority: %d",
+               dev->data->port_id, verb_priorities);
+       return verb_priorities;
+}
+
 /**
  * Convert a flow.
  *
@@ -184,32 +236,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
        }
 }
 
-/**
- * Create drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-       return 0;
-}
-
-/**
- * Delete drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- */
-void
-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-}
-
 /**
  * Remove all flows.
  *
@@ -292,6 +318,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        struct priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
                .ingress = 1,
+               .priority = priv->config.max_verbs_prio - 1,
        };
        struct rte_flow_item items[] = {
                {
@@ -830,18 +857,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
        }
        return 0;
 }
-
-/**
- * Detect number of Verbs flow priorities supported.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   number of supported Verbs flow priority.
- */
-unsigned int
-mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)
-{
-       return 8;
-}
index fd0df17..d960daa 100644 (file)
@@ -1957,3 +1957,235 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
        }
        return ret;
 }
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct ibv_cq *cq;
+       struct ibv_wq *wq = NULL;
+       struct mlx5_rxq_ibv *rxq;
+
+       if (priv->drop_queue.rxq)
+               return priv->drop_queue.rxq;
+       cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+       if (!cq) {
+               DEBUG("port %u cannot allocate CQ for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       wq = mlx5_glue->create_wq(priv->ctx,
+                &(struct ibv_wq_init_attr){
+                       .wq_type = IBV_WQT_RQ,
+                       .max_wr = 1,
+                       .max_sge = 1,
+                       .pd = priv->pd,
+                       .cq = cq,
+                });
+       if (!wq) {
+               DEBUG("port %u cannot allocate WQ for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+       if (!rxq) {
+               DEBUG("port %u cannot allocate drop Rx queue memory",
+                     dev->data->port_id);
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       rxq->cq = cq;
+       rxq->wq = wq;
+       priv->drop_queue.rxq = rxq;
+       return rxq;
+error:
+       if (wq)
+               claim_zero(mlx5_glue->destroy_wq(wq));
+       if (cq)
+               claim_zero(mlx5_glue->destroy_cq(cq));
+       return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+
+       if (rxq->wq)
+               claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+       if (rxq->cq)
+               claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+       rte_free(rxq);
+       priv->drop_queue.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl;
+       struct mlx5_rxq_ibv *rxq;
+       struct mlx5_ind_table_ibv tmpl;
+
+       rxq = mlx5_rxq_ibv_drop_new(dev);
+       if (!rxq)
+               return NULL;
+       tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+               (priv->ctx,
+                &(struct ibv_rwq_ind_table_init_attr){
+                       .log_ind_tbl_size = 0,
+                       .ind_tbl = &rxq->wq,
+                       .comp_mask = 0,
+                });
+       if (!tmpl.ind_table) {
+               DEBUG("port %u cannot allocate indirection table for drop"
+                     " queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+       if (!ind_tbl) {
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       ind_tbl->ind_table = tmpl.ind_table;
+       return ind_tbl;
+error:
+       mlx5_rxq_ibv_drop_release(dev);
+       return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+
+       claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+       mlx5_rxq_ibv_drop_release(dev);
+       rte_free(ind_tbl);
+       priv->drop_queue.hrxq->ind_table = NULL;
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl;
+       struct ibv_qp *qp;
+       struct mlx5_hrxq *hrxq;
+
+       if (priv->drop_queue.hrxq) {
+               rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+               return priv->drop_queue.hrxq;
+       }
+       ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+       if (!ind_tbl)
+               return NULL;
+       qp = mlx5_glue->create_qp_ex(priv->ctx,
+                &(struct ibv_qp_init_attr_ex){
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .comp_mask =
+                               IBV_QP_INIT_ATTR_PD |
+                               IBV_QP_INIT_ATTR_IND_TABLE |
+                               IBV_QP_INIT_ATTR_RX_HASH,
+                       .rx_hash_conf = (struct ibv_rx_hash_conf){
+                               .rx_hash_function =
+                                       IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = rss_hash_default_key_len,
+                               .rx_hash_key = rss_hash_default_key,
+                               .rx_hash_fields_mask = 0,
+                               },
+                       .rwq_ind_tbl = ind_tbl->ind_table,
+                       .pd = priv->pd
+                });
+       if (!qp) {
+               DEBUG("port %u cannot allocate QP for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+       if (!hrxq) {
+               DRV_LOG(WARNING,
+                       "port %u cannot allocate memory for drop queue",
+                       dev->data->port_id);
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       hrxq->ind_table = ind_tbl;
+       hrxq->qp = qp;
+       priv->drop_queue.hrxq = hrxq;
+       rte_atomic32_set(&hrxq->refcnt, 1);
+       return hrxq;
+error:
+       if (ind_tbl)
+               mlx5_ind_table_ibv_drop_release(dev);
+       return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+       if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+               claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+               mlx5_ind_table_ibv_drop_release(dev);
+               rte_free(hrxq);
+               priv->drop_queue.hrxq = NULL;
+       }
+}
index 97b4d9e..708fdd4 100644 (file)
@@ -244,6 +244,8 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
                                   uint16_t desc, unsigned int socket,
@@ -264,6 +266,8 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
                               struct mlx5_ind_table_ibv *ind_tbl);
 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
@@ -276,6 +280,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
                                uint32_t tunnel, uint32_t rss_level);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
 uint64_t mlx5_get_rx_port_offloads(void);
 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);