0, socket);
if (bt->table == NULL) {
rte_errno = ENOMEM;
- DRV_LOG(ERR,
- "failed to allocate memory for btree cache on socket %d",
- socket);
+ DEBUG("failed to allocate memory for btree cache on socket %d",
+ socket);
return -rte_errno;
}
bt->size = n;
(*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
.lkey = UINT32_MAX,
};
- DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
- (void *)bt, (void *)bt->table);
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
return 0;
}
{
if (bt == NULL)
return;
- DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
- (void *)bt, (void *)bt->table);
+ DEBUG("freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
rte_free(bt->table);
memset(bt, 0, sizeof(*bt));
}
* @param bt
* Pointer to B-tree structure.
*/
-static void
-mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
{
+#ifndef NDEBUG
int idx;
struct mlx5_mr_cache *lkp_tbl;
for (idx = 0; idx < bt->len; ++idx) {
struct mlx5_mr_cache *entry = &lkp_tbl[idx];
- DRV_LOG(DEBUG,
- "B-tree(%p)[%u],"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
- (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
+#endif
}
/**
assert(msl->page_sz == ms->hugepage_sz);
/* Number of memsegs in the range. */
ms_n = len / msl->page_sz;
- DRV_LOG(DEBUG,
- "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
- " page_sz=0x%" PRIx64 ", ms_n=%u",
- dev->data->port_id, (void *)addr,
- data.start, data.end, msl->page_sz, ms_n);
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
/* Size of memory for bitmap. */
bmp_size = rte_bitmap_get_memory_footprint(ms_n);
mr = rte_zmalloc_socket(NULL,
bmp_size,
RTE_CACHE_LINE_SIZE, msl->socket_id);
if (mr == NULL) {
- DRV_LOG(WARNING,
- "port %u unable to allocate memory for a new MR of"
- " address (%p).",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = ENOMEM;
goto err_nolock;
}
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- DRV_LOG(WARNING,
- "port %u unable to initialize bitamp for a new MR of"
- " address (%p).",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
goto err_nolock;
}
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
- DRV_LOG(WARNING,
- "port %u unable to find virtually contiguous"
- " chunk for address (%p)."
- " rte_memseg_contig_walk() failed.",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
rte_errno = ENXIO;
goto err_memlock;
}
* here again.
*/
mr_btree_insert(&priv->mr.cache, entry);
- DRV_LOG(DEBUG,
- "port %u found MR for %p on final lookup, abort",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
rte_rwlock_write_unlock(&priv->mr.rwlock);
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
/*
mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
- DRV_LOG(WARNING,
- "port %u fail to create a verbs MR for address (%p)",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
goto err_mrlock;
}
assert((uintptr_t)mr->ibv_mr->addr == data.start);
assert(mr->ibv_mr->length == len);
LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
- DRV_LOG(DEBUG,
- "port %u MR CREATED (%p) for %p:\n"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
- " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
- dev->data->port_id, (void *)mr, (void *)addr,
- data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
- mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
/* Insert to the global cache table. */
mr_insert_dev_cache(dev, mr);
/* Fill in output data. */
int i;
int rebuild = 0;
- DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu",
- dev->data->port_id, addr, len);
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
pos = ms_idx - mr->ms_base_idx;
assert(rte_bitmap_get(mr->ms_bmp, pos));
assert(pos < mr->ms_bmp_n);
- DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p",
- dev->data->port_id, (void *)mr, pos, (void *)start);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
if (--mr->ms_n == 0) {
LIST_REMOVE(mr, mr);
LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
- DRV_LOG(DEBUG, "port %u remove MR(%p) from list",
- dev->data->port_id, (void *)mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
}
/*
* MR is fragmented or will be freed. the global cache must be
* before the core sees the newly allocated memory.
*/
++priv->mr.dev_gen;
- DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
- priv->mr.dev_gen);
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
rte_smp_wmb();
}
rte_rwlock_write_unlock(&priv->mr.rwlock);
- if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
- mlx5_mr_dump_dev(dev);
}
/**
* Pointer to Ethernet device.
*/
void
-mlx5_mr_dump_dev(struct rte_eth_dev *dev)
+mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
{
+#ifndef NDEBUG
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
int mr_n = 0;
LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
unsigned int n;
- DRV_LOG(DEBUG,
- "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
- dev->data->port_id, mr_n++,
- rte_cpu_to_be_32(mr->ibv_mr->lkey),
- mr->ms_n, mr->ms_bmp_n);
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
n = mr_find_next_chunk(mr, &ret, n);
if (!ret.end)
break;
- DRV_LOG(DEBUG,
- " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
- chunk_n++, ret.start, ret.end);
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
}
}
- DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id);
+ DEBUG("port %u dumping global cache", dev->data->port_id);
mlx5_mr_btree_dump(&priv->mr.cache);
rte_rwlock_read_unlock(&priv->mr.rwlock);
+#endif
}
/**
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- dev->data->port_id, rxq_ctrl->idx,
- rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
}
assert(rxq_ibv);
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- PORT_ID(rxq_ibv->rxq_ctrl->priv),
- rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
rxq);
mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
- dev->data->port_id, rxq_ctrl->idx,
- rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
assert(rxq_ctrl->priv);
if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
rxq_ctrl->ibv = NULL;
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
- rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
- dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
- rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DRV_LOG(DEBUG, "port %u cannot create indirection table",
- dev->data->port_id);
+ DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
{
unsigned int i;
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
- DEBUG("port %u delete indirection table %p: queues: %u",
- dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
- }
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
.pd = priv->pd,
},
&qp_init_attr);
- DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
- " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
- " create_flags:0x%x",
- dev->data->port_id, (void *)qp, (void *)ind_tbl,
- (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
- hash_fields, tunnel, rss_level,
- qp_init_attr.comp_mask, qp_init_attr.create_flags);
#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
- DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
- " tunnel:0x%x level:%hhu",
- dev->data->port_id, (void *)qp, (void *)ind_tbl,
- hash_fields, tunnel, rss_level);
#endif
if (!qp) {
rte_errno = errno;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
int
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
- " 0x%x, level: %u",
- dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
- hrxq->tunnel, hrxq->rss_level);
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
rte_errno = EINVAL;
goto error;
}
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
if (!(*priv->txqs)[idx])
return NULL;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->ibv) {
+ if (txq_ctrl->ibv)
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- dev->data->port_id, txq_ctrl->idx,
- rte_atomic32_read(&txq_ctrl->ibv->refcnt));
- }
return txq_ctrl->ibv;
}
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- PORT_ID(txq_ibv->txq_ctrl->priv),
- txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
txq);
mlx5_txq_ibv_get(dev, idx);
rte_atomic32_inc(&ctrl->refcnt);
- DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
- dev->data->port_id,
- ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
- txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
if (priv->uar_base)