static rte_atomic32_t synchro;
+/*
+ * Simple example of custom mempool structure. Holds pointers to all the
+ * elements which are simply malloc'd in this example.
+ */
+struct custom_mempool {
+ rte_spinlock_t lock;
+ unsigned count;
+ unsigned size;
+ void *elts[];
+};
+
+/*
+ * Loop through all the element pointers and allocate a chunk of memory, then
+ * insert that memory into the ring.
+ */
+static int
+custom_mempool_alloc(struct rte_mempool *mp)
+{
+ struct custom_mempool *cm;
+
+ cm = rte_zmalloc("custom_mempool",
+ sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0);
+ if (cm == NULL)
+ return -ENOMEM;
+
+ rte_spinlock_init(&cm->lock);
+ cm->count = 0;
+ cm->size = mp->size;
+ mp->pool_data = cm;
+ return 0;
+}
+
+static void
+custom_mempool_free(struct rte_mempool *mp)
+{
+ rte_free((void *)(mp->pool_data));
+}
+
+static int
+custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
+ int ret = 0;
+
+ rte_spinlock_lock(&cm->lock);
+ if (cm->count + n > cm->size) {
+ ret = -ENOBUFS;
+ } else {
+ memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n);
+ cm->count += n;
+ }
+ rte_spinlock_unlock(&cm->lock);
+ return ret;
+}
+
+
+static int
+custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
+ int ret = 0;
+
+ rte_spinlock_lock(&cm->lock);
+ if (n > cm->count) {
+ ret = -ENOENT;
+ } else {
+ cm->count -= n;
+ memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n);
+ }
+ rte_spinlock_unlock(&cm->lock);
+ return ret;
+}
+
+static unsigned
+custom_mempool_get_count(const struct rte_mempool *mp)
+{
+ struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
+
+ return cm->count;
+}
+
+static struct rte_mempool_ops mempool_ops_custom = {
+ .name = "custom_handler",
+ .alloc = custom_mempool_alloc,
+ .free = custom_mempool_free,
+ .enqueue = custom_mempool_enqueue,
+ .dequeue = custom_mempool_dequeue,
+ .get_count = custom_mempool_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(mempool_ops_custom);
+
/*
* save the object number in the first 4 bytes of object data. All
* other bytes are set to 0.
* test function for mempool test based on singple consumer and single producer,
* can run on one lcore only
*/
-static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
+static int
+test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
{
return test_mempool_single_consumer();
}
-static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg)
+static void
+my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
{
printf("mempool name is %s\n", mp->name);
/* nothing to be implemented here*/
{
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *mp_ext = NULL;
rte_atomic32_init(&synchro);
goto err;
}
+ /* create a mempool with an external handler */
+ mp_ext = rte_mempool_create_empty("test_ext",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_ext == NULL) {
+ printf("cannot allocate mp_ext mempool\n");
+ goto err;
+ }
+ if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) {
+ printf("cannot set custom handler\n");
+ goto err;
+ }
+ if (rte_mempool_populate_default(mp_ext) < 0) {
+ printf("cannot populate mp_ext mempool\n");
+ goto err;
+ }
+ rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
+
/* retrieve the mempool from its name */
if (rte_mempool_lookup("test_nocache") != mp_nocache) {
printf("Cannot lookup mempool from its name\n");
err:
rte_mempool_free(mp_nocache);
rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_ext);
return -1;
}