net/mlx5: add hardware flow debug dump
authorXueming Li <xuemingl@mellanox.com>
Mon, 23 Apr 2018 12:33:07 +0000 (20:33 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 27 Apr 2018 17:00:56 +0000 (18:00 +0100)
Dump verb flow detail including flow spec type and size for debugging
purpose.

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_utils.h

index b71def5..78e3563 100644 (file)
@@ -2090,6 +2090,57 @@ mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
        }
 }
 
+/**
+ * Dump flow hash RX queue detail.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param flow
+ *   Pointer to the rte_flow.
+ * @param hrxq_idx
+ *   Hash RX queue index.
+ */
+static void
+mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
+              struct rte_flow *flow __rte_unused,
+              unsigned int hrxq_idx __rte_unused)
+{
+#ifndef NDEBUG
+       uintptr_t spec_ptr;
+       uint16_t j;
+       char buf[256];
+       uint8_t off;
+
+       spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1);
+       for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs;
+            j++) {
+               struct ibv_flow_spec *spec = (void *)spec_ptr;
+               off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
+                              spec->hdr.size);
+               spec_ptr += spec->hdr.size;
+       }
+       DRV_LOG(DEBUG,
+               "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p,"
+               " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d,"
+               " flags:%x, comp_mask:%x specs:%s",
+               dev->data->port_id, (void *)flow, hrxq_idx,
+               (void *)flow->frxq[hrxq_idx].hrxq,
+               (void *)flow->frxq[hrxq_idx].hrxq->qp,
+               (void *)flow->frxq[hrxq_idx].hrxq->ind_table,
+               flow->frxq[hrxq_idx].hash_fields |
+               (flow->tunnel &&
+                flow->rss_conf.level > 1 ? (uint32_t)IBV_RX_HASH_INNER : 0),
+               flow->rss_conf.queue_num,
+               flow->frxq[hrxq_idx].ibv_attr->num_of_specs,
+               flow->frxq[hrxq_idx].ibv_attr->size,
+               flow->frxq[hrxq_idx].ibv_attr->priority,
+               flow->frxq[hrxq_idx].ibv_attr->type,
+               flow->frxq[hrxq_idx].ibv_attr->flags,
+               flow->frxq[hrxq_idx].ibv_attr->comp_mask,
+               buf);
+#endif
+}
+
 /**
  * Complete flow rule creation.
  *
@@ -2132,6 +2183,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
                flow->frxq[i].ibv_flow =
                        mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
                                               flow->frxq[i].ibv_attr);
+               mlx5_flow_dump(dev, flow, i);
                if (!flow->frxq[i].ibv_flow) {
                        rte_flow_error_set(error, ENOMEM,
                                           RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2139,11 +2191,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
                        goto error;
                }
                ++flows_n;
-               DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
-                       dev->data->port_id,
-                       (void *)flow, i,
-                       (void *)flow->frxq[i].hrxq->qp,
-                       (void *)flow->frxq[i].ibv_flow);
        }
        if (!flows_n) {
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2687,24 +2734,25 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
                                              flow->rss_conf.level);
                        if (!flow->frxq[i].hrxq) {
                                DRV_LOG(DEBUG,
-                                       "port %u flow %p cannot be applied",
+                                       "port %u flow %p cannot create hash"
+                                       " rxq",
                                        dev->data->port_id, (void *)flow);
                                rte_errno = EINVAL;
                                return -rte_errno;
                        }
 flow_create:
+                       mlx5_flow_dump(dev, flow, i);
                        flow->frxq[i].ibv_flow =
                                mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
                                                       flow->frxq[i].ibv_attr);
                        if (!flow->frxq[i].ibv_flow) {
                                DRV_LOG(DEBUG,
-                                       "port %u flow %p cannot be applied",
-                                       dev->data->port_id, (void *)flow);
+                                       "port %u flow %p type %u cannot be"
+                                       " applied",
+                                       dev->data->port_id, (void *)flow, i);
                                rte_errno = EINVAL;
                                return -rte_errno;
                        }
-                       DRV_LOG(DEBUG, "port %u flow %p applied",
-                               dev->data->port_id, (void *)flow);
                }
                mlx5_flow_create_update_rxqs(dev, flow);
        }
index f81de25..126412d 100644 (file)
@@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
        }
        rte_atomic32_inc(&ind_tbl->refcnt);
        LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
-       DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               dev->data->port_id, (void *)ind_tbl,
-               rte_atomic32_read(&ind_tbl->refcnt));
+       DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+             dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+             rte_atomic32_read(&ind_tbl->refcnt));
        return ind_tbl;
 error:
        rte_free(ind_tbl);
@@ -1330,9 +1330,12 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
        DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
                ((struct priv *)dev->data->dev_private)->port,
                (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
-       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
                claim_zero(mlx5_glue->destroy_rwq_ind_table
                           (ind_tbl->ind_table));
+               DEBUG("port %u delete indirection table %p: queues: %u",
+                     dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+       }
        for (i = 0; i != ind_tbl->queues_n; ++i)
                claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
        if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1449,6 +1452,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        .pd = priv->pd,
                 },
                 &qp_init_attr);
+       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+             " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
+             " create_flags:0x%x",
+             dev->data->port_id, (void *)qp, (void *)ind_tbl,
+             (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+             hash_fields, tunnel, rss_level,
+             qp_init_attr.comp_mask, qp_init_attr.create_flags);
 #else
        qp = mlx5_glue->create_qp_ex
                (priv->ctx,
@@ -1470,6 +1480,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        .rwq_ind_tbl = ind_tbl->ind_table,
                        .pd = priv->pd,
                 });
+       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+             " tunnel:0x%x level:%hhu",
+             dev->data->port_id, (void *)qp, (void *)ind_tbl,
+             hash_fields, tunnel, rss_level);
 #endif
        if (!qp) {
                rte_errno = errno;
@@ -1579,6 +1593,10 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
                (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
        if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
                claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+               DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
+                     " 0x%x, level: %u",
+                     dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+                     hrxq->tunnel, hrxq->rss_level);
                mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
                LIST_REMOVE(hrxq, next);
                rte_free(hrxq);
index e8f980f..886f60e 100644 (file)
@@ -103,16 +103,22 @@ extern int mlx5_logtype;
 /* claim_zero() does not perform any check when debugging is disabled. */
 #ifndef NDEBUG
 
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
 #define claim_zero(...) assert((__VA_ARGS__) == 0)
 #define claim_nonzero(...) assert((__VA_ARGS__) != 0)
 
 #else /* NDEBUG */
 
+#define DEBUG(...) (void)0
 #define claim_zero(...) (__VA_ARGS__)
 #define claim_nonzero(...) (__VA_ARGS__)
 
 #endif /* NDEBUG */
 
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
+
 /* Convenience macros for accessing mbuf fields. */
 #define NEXT(m) ((m)->next)
 #define DATA_LEN(m) ((m)->data_len)