if (ts_params->mbuf_pool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_pool));
+ rte_mempool_avail_count(ts_params->mbuf_pool));
}
if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->op_mpool));
+ rte_mempool_avail_count(ts_params->op_mpool));
}
}
if (ts_params->mbuf_pool != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_pool));
+ rte_mempool_avail_count(ts_params->mbuf_pool));
rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats);
if (ts_params->mbuf_mp != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_mp));
+ rte_mempool_avail_count(ts_params->mbuf_mp));
if (ts_params->op_mpool != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_OP POOL count %u\n",
- rte_mempool_count(ts_params->op_mpool));
+ rte_mempool_avail_count(ts_params->op_mpool));
}
static int
if (ts_params->mbuf_mp != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_mp));
+ rte_mempool_avail_count(ts_params->mbuf_mp));
rte_cryptodev_stats_get(ts_params->dev_id, &stats);
* - increment it's reference up to N+1,
* - enqueue it N times into the ring for slave cores to free.
*/
- for (i = 0, n = rte_mempool_count(refcnt_pool);
+ for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
i++) {
ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
/* check that all mbufs are back into mempool by now */
for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
- if ((i = rte_mempool_count(refcnt_pool)) == n) {
+ if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
refcnt_lcore[lcore] += tref;
printf("%s(lcore=%u, iter=%u) completed, "
"%u references processed\n",
printf("get object count\n");
/* We have to count the extra caches, one in this case. */
offset = use_external_cache ? 1 * cache->len : 0;
- if (rte_mempool_count(mp) + offset != MEMPOOL_SIZE - 1)
+ if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
GOTO_ERR(ret, out);
printf("get private data\n");
return ret;
}
printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
- mp->name, rte_mempool_free_count(mp));
+ mp->name, rte_mempool_in_use_count(mp));
if (rte_mempool_full(mp) != 1) {
printf("test_mempool_basic_ex the mempool should be full\n");
goto fail_mp_basic_ex;
external_cache_size : (unsigned) mp->cache_size,
cores, n_get_bulk, n_put_bulk, n_keep);
- if (rte_mempool_count(mp) != MEMPOOL_SIZE) {
+ if (rte_mempool_avail_count(mp) != MEMPOOL_SIZE) {
printf("mempool is not full\n");
return -1;
}
PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
their behavior will be kept in 16.07 and will be removed in 16.11.
+* The APIs rte_mempool_count and rte_mempool_free_count are being deprecated
+ on the basis that they are confusing to use - free_count actually returns
+ the number of allocated entries, not the number of free entries as expected.
+ They are being replaced by rte_mempool_avail_count and
+ rte_mempool_in_use_count respectively.
+
* The mempool functions for single/multi producer/consumer are deprecated and
will be removed in 16.11.
It is replaced by rte_mempool_generic_get/put functions.
"Failed to allocate rx buffer "
"sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
- rte_mempool_count(rxq->mb_pool),
- rte_mempool_free_count(rxq->mb_pool));
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
return -ENOMEM;
}
rxq->sw_rx_ring[idx].mbuf = new_mb;
PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
qidx, rxq, mp->name, nb_desc,
- rte_mempool_count(mp), rxq->phys);
+ rte_mempool_avail_count(mp), rxq->phys);
dev->data->rx_queues[qidx] = rxq;
dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
total_rxq_desc += rxq->qlen_mask + 1;
exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
exp_buffs *= nic->eth_dev->data->nb_rx_queues;
- if (rte_mempool_count(rxq->pool) < exp_buffs) {
+ if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
rxq->pool->name,
- rte_mempool_count(rxq->pool),
+ rte_mempool_avail_count(rxq->pool),
exp_buffs);
return -ENOENT;
}
pool = rte_mempool_lookup(buf_name);
if (pool)
printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
- rte_mempool_count(pool), (unsigned)NB_MBUF);
+ rte_mempool_avail_count(pool),
+ (unsigned int)NB_MBUF);
else
printf("Can't find mempool %s\n", buf_name);
stats.oerrors - tx_stats[i].oerrors);
memcpy(&tx_stats[i], &stats, sizeof(stats));
- //printf("MP = %d\n", rte_mempool_count(conf->app_pktmbuf_pool));
-
#if APP_COLLECT_STAT
printf("-------+------------+------------+\n");
printf(" | received | dropped |\n");
}
/* Return the number of entries in the mempool */
-unsigned
-rte_mempool_count(const struct rte_mempool *mp)
+unsigned int
+rte_mempool_avail_count(const struct rte_mempool *mp)
{
unsigned count;
unsigned lcore_id;
return count;
}
+/* return the number of entries allocated from the mempool */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp)
+{
+ return mp->size - rte_mempool_avail_count(mp);
+}
+
+unsigned int
+rte_mempool_count(const struct rte_mempool *mp)
+{
+ return rte_mempool_avail_count(mp);
+}
+
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
* @return
* The number of entries in the mempool.
*/
+unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
+
+/**
+ * @deprecated
+ * Return the number of entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of entries in the mempool.
+ */
+__rte_deprecated
unsigned rte_mempool_count(const struct rte_mempool *mp);
/**
+ * Return the number of elements which have been allocated from the mempool
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of free entries in the mempool.
+ */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp);
+
+/**
+ * @deprecated
* Return the number of free entries in the mempool ring.
* i.e. how many entries can be freed back to the mempool.
*
* @return
* The number of free entries in the mempool.
*/
+__rte_deprecated
static inline unsigned
rte_mempool_free_count(const struct rte_mempool *mp)
{
- return mp->size - rte_mempool_count(mp);
+ return rte_mempool_in_use_count(mp);
}
/**
static inline int
rte_mempool_full(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == mp->size);
+ return !!(rte_mempool_avail_count(mp) == mp->size);
}
/**
static inline int
rte_mempool_empty(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == 0);
+ return !!(rte_mempool_avail_count(mp) == 0);
}
/**
DPDK_16.07 {
global:
+ rte_mempool_avail_count;
rte_mempool_cache_create;
rte_mempool_cache_flush;
rte_mempool_cache_free;
rte_mempool_free;
rte_mempool_generic_get;
rte_mempool_generic_put;
+ rte_mempool_in_use_count;
rte_mempool_mem_iter;
rte_mempool_obj_iter;
rte_mempool_ops_table;