git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mempool: allow populating with unaligned virtual area
[dpdk.git]
/
lib
/
librte_mempool
/
rte_mempool.h
diff --git
a/lib/librte_mempool/rte_mempool.h
b/lib/librte_mempool/rte_mempool.h
index
7c9cd9a
..
0fe8aa7
100644
(file)
--- a/
lib/librte_mempool/rte_mempool.h
+++ b/
lib/librte_mempool/rte_mempool.h
@@
-427,7
+427,7
@@
typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * Dequeue a number of conti
q
uous object blocks from the external pool.
+ * Dequeue a number of conti
g
uous object blocks from the external pool.
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
@@
-832,10
+832,9
@@
int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
* Note that the rte_mempool_register_ops fails silently here when
* more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
*/
* Note that the rte_mempool_register_ops fails silently here when
* more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
*/
-#define MEMPOOL_REGISTER_OPS(ops) \
- void mp_hdlr_init_##ops(void); \
- void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
- { \
+#define MEMPOOL_REGISTER_OPS(ops) \
+ RTE_INIT(mp_hdlr_init_##ops) \
+ { \
rte_mempool_register_ops(&ops); \
}
rte_mempool_register_ops(&ops); \
}
@@
-1043,9
+1042,8
@@
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
* A pointer to the mempool structure.
* @param addr
* The virtual address of memory that should be used to store objects.
* A pointer to the mempool structure.
* @param addr
* The virtual address of memory that should be used to store objects.
- * Must be page-aligned.
* @param len
* @param len
- * The length of memory in bytes.
Must be page-aligned.
+ * The length of memory in bytes.
* @param pg_sz
* The size of memory pages in this virtual area.
* @param free_cb
* @param pg_sz
* The size of memory pages in this virtual area.
* @param free_cb
@@
-1364,7
+1362,7
@@
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
- * In the offchance that we are buffer constrained,
+ * In the off
chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.