net/mlx5: clean-up developer logs
authorNelio Laranjeiro <nelio.laranjeiro@6wind.com>
Tue, 5 Jun 2018 08:45:22 +0000 (10:45 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 2 Jul 2018 23:35:57 +0000 (01:35 +0200)
Split maintainers logs from user logs.

A lot of debug logs are present providing internal information on how
the PMD works to users.  Such logs should not be available for them and
thus should remain available only when the PMD is compiled in debug
mode.

This commits removes some useless debug logs, move the Maintainers ones
under DEBUG and also move dump into debug mode only.

Cc: stable@dpdk.org
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_mr.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_txq.c

index 08105a4..1d1bcb5 100644 (file)
@@ -198,9 +198,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
                                      0, socket);
        if (bt->table == NULL) {
                rte_errno = ENOMEM;
-               DRV_LOG(ERR,
-                       "failed to allocate memory for btree cache on socket %d",
-                       socket);
+               DEBUG("failed to allocate memory for btree cache on socket %d",
+                     socket);
                return -rte_errno;
        }
        bt->size = n;
@@ -208,8 +207,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
        (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
                .lkey = UINT32_MAX,
        };
-       DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
-               (void *)bt, (void *)bt->table);
+       DEBUG("initialized B-tree %p with table %p",
+             (void *)bt, (void *)bt->table);
        return 0;
 }
 
@@ -224,8 +223,8 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
 {
        if (bt == NULL)
                return;
-       DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
-               (void *)bt, (void *)bt->table);
+       DEBUG("freeing B-tree %p with table %p",
+             (void *)bt, (void *)bt->table);
        rte_free(bt->table);
        memset(bt, 0, sizeof(*bt));
 }
@@ -236,9 +235,10 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
  * @param bt
  *   Pointer to B-tree structure.
  */
-static void
-mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
 {
+#ifndef NDEBUG
        int idx;
        struct mlx5_mr_cache *lkp_tbl;
 
@@ -248,11 +248,11 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
        for (idx = 0; idx < bt->len; ++idx) {
                struct mlx5_mr_cache *entry = &lkp_tbl[idx];
 
-               DRV_LOG(DEBUG,
-                       "B-tree(%p)[%u],"
-                       " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
-                       (void *)bt, idx, entry->start, entry->end, entry->lkey);
+               DEBUG("B-tree(%p)[%u],"
+                     " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+                     (void *)bt, idx, entry->start, entry->end, entry->lkey);
        }
+#endif
 }
 
 /**
@@ -576,11 +576,10 @@ alloc_resources:
        assert(msl->page_sz == ms->hugepage_sz);
        /* Number of memsegs in the range. */
        ms_n = len / msl->page_sz;
-       DRV_LOG(DEBUG,
-               "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
-               " page_sz=0x%" PRIx64 ", ms_n=%u",
-               dev->data->port_id, (void *)addr,
-               data.start, data.end, msl->page_sz, ms_n);
+       DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+             " page_sz=0x%" PRIx64 ", ms_n=%u",
+             dev->data->port_id, (void *)addr,
+             data.start, data.end, msl->page_sz, ms_n);
        /* Size of memory for bitmap. */
        bmp_size = rte_bitmap_get_memory_footprint(ms_n);
        mr = rte_zmalloc_socket(NULL,
@@ -589,10 +588,9 @@ alloc_resources:
                                bmp_size,
                                RTE_CACHE_LINE_SIZE, msl->socket_id);
        if (mr == NULL) {
-               DRV_LOG(WARNING,
-                       "port %u unable to allocate memory for a new MR of"
-                       " address (%p).",
-                       dev->data->port_id, (void *)addr);
+               DEBUG("port %u unable to allocate memory for a new MR of"
+                     " address (%p).",
+                     dev->data->port_id, (void *)addr);
                rte_errno = ENOMEM;
                goto err_nolock;
        }
@@ -606,10 +604,9 @@ alloc_resources:
        bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
        mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
        if (mr->ms_bmp == NULL) {
-               DRV_LOG(WARNING,
-                       "port %u unable to initialize bitamp for a new MR of"
-                       " address (%p).",
-                       dev->data->port_id, (void *)addr);
+               DEBUG("port %u unable to initialize bitamp for a new MR of"
+                     " address (%p).",
+                     dev->data->port_id, (void *)addr);
                rte_errno = EINVAL;
                goto err_nolock;
        }
@@ -625,11 +622,10 @@ alloc_resources:
        data_re = data;
        if (len > msl->page_sz &&
            !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
-               DRV_LOG(WARNING,
-                       "port %u unable to find virtually contiguous"
-                       " chunk for address (%p)."
-                       " rte_memseg_contig_walk() failed.",
-                       dev->data->port_id, (void *)addr);
+               DEBUG("port %u unable to find virtually contiguous"
+                     " chunk for address (%p)."
+                     " rte_memseg_contig_walk() failed.",
+                     dev->data->port_id, (void *)addr);
                rte_errno = ENXIO;
                goto err_memlock;
        }
@@ -657,9 +653,8 @@ alloc_resources:
                 * here again.
                 */
                mr_btree_insert(&priv->mr.cache, entry);
-               DRV_LOG(DEBUG,
-                       "port %u found MR for %p on final lookup, abort",
-                       dev->data->port_id, (void *)addr);
+               DEBUG("port %u found MR for %p on final lookup, abort",
+                     dev->data->port_id, (void *)addr);
                rte_rwlock_write_unlock(&priv->mr.rwlock);
                rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
                /*
@@ -707,22 +702,20 @@ alloc_resources:
        mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
                                       IBV_ACCESS_LOCAL_WRITE);
        if (mr->ibv_mr == NULL) {
-               DRV_LOG(WARNING,
-                       "port %u fail to create a verbs MR for address (%p)",
-                       dev->data->port_id, (void *)addr);
+               DEBUG("port %u fail to create a verbs MR for address (%p)",
+                     dev->data->port_id, (void *)addr);
                rte_errno = EINVAL;
                goto err_mrlock;
        }
        assert((uintptr_t)mr->ibv_mr->addr == data.start);
        assert(mr->ibv_mr->length == len);
        LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
-       DRV_LOG(DEBUG,
-               "port %u MR CREATED (%p) for %p:\n"
-               "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
-               " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
-               dev->data->port_id, (void *)mr, (void *)addr,
-               data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
-               mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+       DEBUG("port %u MR CREATED (%p) for %p:\n"
+             "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+             " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+             dev->data->port_id, (void *)mr, (void *)addr,
+             data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+             mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
        /* Insert to the global cache table. */
        mr_insert_dev_cache(dev, mr);
        /* Fill in output data. */
@@ -797,8 +790,8 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
        int i;
        int rebuild = 0;
 
-       DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu",
-               dev->data->port_id, addr, len);
+       DEBUG("port %u free callback: addr=%p, len=%zu",
+             dev->data->port_id, addr, len);
        msl = rte_mem_virt2memseg_list(addr);
        /* addr and len must be page-aligned. */
        assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
@@ -825,14 +818,14 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
                pos = ms_idx - mr->ms_base_idx;
                assert(rte_bitmap_get(mr->ms_bmp, pos));
                assert(pos < mr->ms_bmp_n);
-               DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p",
-                       dev->data->port_id, (void *)mr, pos, (void *)start);
+               DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+                     dev->data->port_id, (void *)mr, pos, (void *)start);
                rte_bitmap_clear(mr->ms_bmp, pos);
                if (--mr->ms_n == 0) {
                        LIST_REMOVE(mr, mr);
                        LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
-                       DRV_LOG(DEBUG, "port %u remove MR(%p) from list",
-                               dev->data->port_id, (void *)mr);
+                       DEBUG("port %u remove MR(%p) from list",
+                             dev->data->port_id, (void *)mr);
                }
                /*
                 * MR is fragmented or will be freed. the global cache must be
@@ -852,13 +845,11 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
                 * before the core sees the newly allocated memory.
                 */
                ++priv->mr.dev_gen;
-               DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
-                       priv->mr.dev_gen);
+               DEBUG("broadcasting local cache flush, gen=%d",
+                     priv->mr.dev_gen);
                rte_smp_wmb();
        }
        rte_rwlock_write_unlock(&priv->mr.rwlock);
-       if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
-               mlx5_mr_dump_dev(dev);
 }
 
 /**
@@ -1123,8 +1114,9 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
  *   Pointer to Ethernet device.
  */
 void
-mlx5_mr_dump_dev(struct rte_eth_dev *dev)
+mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
 {
+#ifndef NDEBUG
        struct priv *priv = dev->data->dev_private;
        struct mlx5_mr *mr;
        int mr_n = 0;
@@ -1135,11 +1127,10 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev)
        LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
                unsigned int n;
 
-               DRV_LOG(DEBUG,
-                       "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
-                       dev->data->port_id, mr_n++,
-                       rte_cpu_to_be_32(mr->ibv_mr->lkey),
-                       mr->ms_n, mr->ms_bmp_n);
+               DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+                     dev->data->port_id, mr_n++,
+                     rte_cpu_to_be_32(mr->ibv_mr->lkey),
+                     mr->ms_n, mr->ms_bmp_n);
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
@@ -1148,14 +1139,14 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev)
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (!ret.end)
                                break;
-                       DRV_LOG(DEBUG,
-                               "  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
-                               chunk_n++, ret.start, ret.end);
+                       DEBUG("  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+                             chunk_n++, ret.start, ret.end);
                }
        }
-       DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id);
+       DEBUG("port %u dumping global cache", dev->data->port_id);
        mlx5_mr_btree_dump(&priv->mr.cache);
        rte_rwlock_read_unlock(&priv->mr.rwlock);
+#endif
 }
 
 /**
index e0b2821..a57003f 100644 (file)
@@ -74,9 +74,12 @@ void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
                          size_t len, void *arg);
 int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
                      struct rte_mempool *mp);
-void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
 void mlx5_mr_release(struct rte_eth_dev *dev);
 
+/* Debug purpose functions. */
+void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);
+void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
+
 /**
  * Look up LKey from given lookup table by linear search. Firstly look up the
  * last-hit entry. If miss, the entire array is searched. If found, update the
index de3f869..17db7c1 100644 (file)
@@ -993,8 +993,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
        DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
                idx, (void *)&tmpl);
        rte_atomic32_inc(&tmpl->refcnt);
-       DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-               dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
        LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
        return tmpl;
@@ -1036,9 +1034,6 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
        rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
        if (rxq_ctrl->ibv) {
                rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
-               DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-                       dev->data->port_id, rxq_ctrl->idx,
-                       rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
        }
        return rxq_ctrl->ibv;
 }
@@ -1058,9 +1053,6 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
        assert(rxq_ibv);
        assert(rxq_ibv->wq);
        assert(rxq_ibv->cq);
-       DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-               PORT_ID(rxq_ibv->rxq_ctrl->priv),
-               rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
        if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
                rxq_free_elts(rxq_ibv->rxq_ctrl);
                claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
@@ -1449,8 +1441,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
        tmpl->idx = idx;
        rte_atomic32_inc(&tmpl->refcnt);
-       DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
-               idx, rte_atomic32_read(&tmpl->refcnt));
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
@@ -1481,9 +1471,6 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
                                        rxq);
                mlx5_rxq_ibv_get(dev, idx);
                rte_atomic32_inc(&rxq_ctrl->refcnt);
-               DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
-                       dev->data->port_id, rxq_ctrl->idx,
-                       rte_atomic32_read(&rxq_ctrl->refcnt));
        }
        return rxq_ctrl;
 }
@@ -1511,8 +1498,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
        assert(rxq_ctrl->priv);
        if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
                rxq_ctrl->ibv = NULL;
-       DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
-               rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
        if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
                mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
                LIST_REMOVE(rxq_ctrl, next);
@@ -1630,14 +1615,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
        }
        rte_atomic32_inc(&ind_tbl->refcnt);
        LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
-       DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
-             dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
-             rte_atomic32_read(&ind_tbl->refcnt));
        return ind_tbl;
 error:
        rte_free(ind_tbl);
-       DRV_LOG(DEBUG, "port %u cannot create indirection table",
-               dev->data->port_id);
+       DEBUG("port %u cannot create indirection table", dev->data->port_id);
        return NULL;
 }
 
@@ -1672,9 +1653,6 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
                unsigned int i;
 
                rte_atomic32_inc(&ind_tbl->refcnt);
-               DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-                       dev->data->port_id, (void *)ind_tbl,
-                       rte_atomic32_read(&ind_tbl->refcnt));
                for (i = 0; i != ind_tbl->queues_n; ++i)
                        mlx5_rxq_get(dev, ind_tbl->queues[i]);
        }
@@ -1698,15 +1676,9 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
 {
        unsigned int i;
 
-       DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               dev->data->port_id, (void *)ind_tbl,
-               rte_atomic32_read(&ind_tbl->refcnt));
-       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
                claim_zero(mlx5_glue->destroy_rwq_ind_table
                           (ind_tbl->ind_table));
-               DEBUG("port %u delete indirection table %p: queues: %u",
-                     dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
-       }
        for (i = 0; i != ind_tbl->queues_n; ++i)
                claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
        if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1823,13 +1795,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        .pd = priv->pd,
                 },
                 &qp_init_attr);
-       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
-             " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
-             " create_flags:0x%x",
-             dev->data->port_id, (void *)qp, (void *)ind_tbl,
-             (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
-             hash_fields, tunnel, rss_level,
-             qp_init_attr.comp_mask, qp_init_attr.create_flags);
 #else
        qp = mlx5_glue->create_qp_ex
                (priv->ctx,
@@ -1851,10 +1816,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        .rwq_ind_tbl = ind_tbl->ind_table,
                        .pd = priv->pd,
                 });
-       DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
-             " tunnel:0x%x level:%hhu",
-             dev->data->port_id, (void *)qp, (void *)ind_tbl,
-             hash_fields, tunnel, rss_level);
 #endif
        if (!qp) {
                rte_errno = errno;
@@ -1872,9 +1833,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        memcpy(hrxq->rss_key, rss_key, rss_key_len);
        rte_atomic32_inc(&hrxq->refcnt);
        LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
-       DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-               dev->data->port_id, (void *)hrxq,
-               rte_atomic32_read(&hrxq->refcnt));
        return hrxq;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
@@ -1937,9 +1895,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                        continue;
                }
                rte_atomic32_inc(&hrxq->refcnt);
-               DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-                       dev->data->port_id, (void *)hrxq,
-                       rte_atomic32_read(&hrxq->refcnt));
                return hrxq;
        }
        return NULL;
@@ -1959,15 +1914,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
 int
 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
-       DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-               dev->data->port_id, (void *)hrxq,
-               rte_atomic32_read(&hrxq->refcnt));
        if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
                claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
-               DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
-                     " 0x%x, level: %u",
-                     dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
-                     hrxq->tunnel, hrxq->rss_level);
                mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
                LIST_REMOVE(hrxq, next);
                rte_free(hrxq);
index 3e7c0a9..4d2078b 100644 (file)
@@ -167,8 +167,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
-       if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
-               mlx5_mr_dump_dev(dev);
        ret = mlx5_rx_intr_vec_enable(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
index 691ea07..068f36d 100644 (file)
@@ -514,8 +514,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                rte_errno = EINVAL;
                goto error;
        }
-       DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
-               dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
        LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
        txq_ibv->txq_ctrl = txq_ctrl;
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -553,12 +551,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
        if (!(*priv->txqs)[idx])
                return NULL;
        txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-       if (txq_ctrl->ibv) {
+       if (txq_ctrl->ibv)
                rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
-               DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
-                       dev->data->port_id, txq_ctrl->idx,
-                     rte_atomic32_read(&txq_ctrl->ibv->refcnt));
-       }
        return txq_ctrl->ibv;
 }
 
@@ -575,9 +569,6 @@ int
 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
 {
        assert(txq_ibv);
-       DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
-               PORT_ID(txq_ibv->txq_ctrl->priv),
-               txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
        if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
                claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
                claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
@@ -778,8 +769,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
        tmpl->txq.stats.idx = idx;
        rte_atomic32_inc(&tmpl->refcnt);
-       DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
-               idx, rte_atomic32_read(&tmpl->refcnt));
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 error:
@@ -809,9 +798,6 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
                                    txq);
                mlx5_txq_ibv_get(dev, idx);
                rte_atomic32_inc(&ctrl->refcnt);
-               DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
-                       dev->data->port_id,
-                       ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
        }
        return ctrl;
 }
@@ -837,8 +823,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
        if (!(*priv->txqs)[idx])
                return 0;
        txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-       DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
-               txq->idx, rte_atomic32_read(&txq->refcnt));
        if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
                txq->ibv = NULL;
        if (priv->uar_base)