git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/ice/base: clean code in flow director module
[dpdk.git]
/
lib
/
librte_mempool
/
rte_mempool.h
diff --git
a/lib/librte_mempool/rte_mempool.h
b/lib/librte_mempool/rte_mempool.h
index
c90cf31
..
9e0ee05
100644
(file)
--- a/
lib/librte_mempool/rte_mempool.h
+++ b/
lib/librte_mempool/rte_mempool.h
@@
-28,9
+28,9
@@
* rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
* thread due to the internal per-lcore cache. Due to the lack of caching,
* rte_mempool_get() or rte_mempool_put() performance will suffer when called
* rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
* thread due to the internal per-lcore cache. Due to the lack of caching,
* rte_mempool_get() or rte_mempool_put() performance will suffer when called
- * by
non-EAL threads. Instead, non-EAL threads should call
- *
rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
- * created with rte_mempool_cache_create().
+ * by
unregistered non-EAL threads. Instead, unregistered non-EAL threads
+ *
should call rte_mempool_generic_get() or rte_mempool_generic_put() with a
+ *
user cache
created with rte_mempool_cache_create().
*/
#include <stdio.h>
*/
#include <stdio.h>
@@
-51,6
+51,8
@@
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_memcpy.h>
#include <rte_common.h>
+#include "rte_mempool_trace_fp.h"
+
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
extern "C" {
#endif
@@
-736,6
+738,7
@@
rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
{
struct rte_mempool_ops *ops;
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->dequeue(mp, obj_table, n);
}
ops = rte_mempool_get_ops(mp->ops_index);
return ops->dequeue(mp, obj_table, n);
}
@@
-761,6
+764,7
@@
rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
ops = rte_mempool_get_ops(mp->ops_index);
RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
ops = rte_mempool_get_ops(mp->ops_index);
RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
+ rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
return ops->dequeue_contig_blocks(mp, first_obj_table, n);
}
return ops->dequeue_contig_blocks(mp, first_obj_table, n);
}
@@
-783,6
+787,7
@@
rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
{
struct rte_mempool_ops *ops;
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);
}
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);
}
@@
-1107,9
+1112,12
@@
rte_mempool_free(struct rte_mempool *mp);
* @param opaque
* An opaque argument passed to free_cb.
* @return
* @param opaque
* An opaque argument passed to free_cb.
* @return
- * The number of objects added on success.
+ * The number of objects added on success
(strictly positive)
.
* On error, the chunk is not added in the memory list of the
* On error, the chunk is not added in the memory list of the
- * mempool and a negative errno is returned.
+ * mempool the following code is returned:
+ * (0): not enough room in chunk for one object.
+ * (-ENOSPC): mempool is already populated.
+ * (-ENOMEM): allocation failure.
*/
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
*/
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
@@
-1134,9
+1142,12
@@
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
* @param opaque
* An opaque argument passed to free_cb.
* @return
* @param opaque
* An opaque argument passed to free_cb.
* @return
- * The number of objects added on success.
+ * The number of objects added on success
(strictly positive)
.
* On error, the chunk is not added in the memory list of the
* On error, the chunk is not added in the memory list of the
- * mempool and a negative errno is returned.
+ * mempool the following code is returned:
+ * (0): not enough room in chunk for one object.
+ * (-ENOSPC): mempool is already populated.
+ * (-ENOMEM): allocation failure.
*/
int
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
*/
int
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
@@
-1222,7
+1233,7
@@
void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
/**
* Create a user-owned mempool cache.
*
/**
* Create a user-owned mempool cache.
*
- * This can be used by non-EAL threads to enable caching when they
+ * This can be used by
unregistered
non-EAL threads to enable caching when they
* interact with a mempool.
*
* @param size
* interact with a mempool.
*
* @param size
@@
-1253,7
+1264,8
@@
rte_mempool_cache_free(struct rte_mempool_cache *cache);
* @param lcore_id
* The logical core id.
* @return
* @param lcore_id
* The logical core id.
* @return
- * A pointer to the mempool cache or NULL if disabled or non-EAL thread.
+ * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL
+ * thread.
*/
static __rte_always_inline struct rte_mempool_cache *
rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
*/
static __rte_always_inline struct rte_mempool_cache *
rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
@@
-1264,6
+1276,8
@@
rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
if (lcore_id >= RTE_MAX_LCORE)
return NULL;
if (lcore_id >= RTE_MAX_LCORE)
return NULL;
+ rte_mempool_trace_default_cache(mp, lcore_id,
+ &mp->local_cache[lcore_id]);
return &mp->local_cache[lcore_id];
}
return &mp->local_cache[lcore_id];
}
@@
-1283,6
+1297,7
@@
rte_mempool_cache_flush(struct rte_mempool_cache *cache,
cache = rte_mempool_default_cache(mp, rte_lcore_id());
if (cache == NULL || cache->len == 0)
return;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
if (cache == NULL || cache->len == 0)
return;
+ rte_mempool_trace_cache_flush(cache, mp);
rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
cache->len = 0;
}
rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
cache->len = 0;
}
@@
-1362,6
+1377,7
@@
static __rte_always_inline void
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned int n, struct rte_mempool_cache *cache)
{
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned int n, struct rte_mempool_cache *cache)
{
+ rte_mempool_trace_generic_put(mp, obj_table, n, cache);
__mempool_check_cookies(mp, obj_table, n, 0);
__mempool_generic_put(mp, obj_table, n, cache);
}
__mempool_check_cookies(mp, obj_table, n, 0);
__mempool_generic_put(mp, obj_table, n, cache);
}
@@
-1386,6
+1402,7
@@
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
rte_mempool_generic_put(mp, obj_table, n, cache);
}
rte_mempool_generic_put(mp, obj_table, n, cache);
}
@@
-1507,6
+1524,7
@@
rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
+ rte_mempool_trace_generic_get(mp, obj_table, n, cache);
return ret;
}
return ret;
}
@@
-1537,6
+1555,7
@@
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
return rte_mempool_generic_get(mp, obj_table, n, cache);
}
return rte_mempool_generic_get(mp, obj_table, n, cache);
}
@@
-1606,6
+1625,7
@@
rte_mempool_get_contig_blocks(struct rte_mempool *mp,
__MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
}
__MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
}
+ rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
return ret;
}
return ret;
}