mempool/bucket: implement block dequeue operation
authorArtem V. Andreev <artem.andreev@oktetlabs.ru>
Thu, 26 Apr 2018 10:59:22 +0000 (11:59 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 26 Apr 2018 21:34:07 +0000 (23:34 +0200)
Signed-off-by: Artem V. Andreev <artem.andreev@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
doc/guides/rel_notes/release_18_05.rst
drivers/mempool/bucket/rte_mempool_bucket.c

index 3899c7d..f93141b 100644 (file)
@@ -47,6 +47,8 @@ New Features
   block of objects.
   Number of objects in the block depends on how many objects fit in
   RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB memory chunk which is build time option.
+  The number may be obtained using rte_mempool_ops_get_info() API.
+  Contiguous blocks may be allocated using rte_mempool_get_contig_blocks() API.
 
 * **Added PMD-recommended Tx and Rx parameters**
 
index ef822eb..24be24e 100644 (file)
@@ -294,6 +294,46 @@ bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
        return rc;
 }
 
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+                            unsigned int n)
+{
+       struct bucket_data *bd = mp->pool_data;
+       const uint32_t header_size = bd->header_size;
+       struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+       unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+       struct bucket_header *hdr;
+       void **first_objp = first_obj_table;
+
+       bucket_adopt_orphans(bd);
+
+       n -= n_buckets_from_stack;
+       while (n_buckets_from_stack-- > 0) {
+               hdr = bucket_stack_pop_unsafe(cur_stack);
+               *first_objp++ = (uint8_t *)hdr + header_size;
+       }
+       if (n > 0) {
+               if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+                                                  first_objp, n, NULL) != n)) {
+                       /* Return the already dequeued buckets */
+                       while (first_objp-- != first_obj_table) {
+                               bucket_stack_push(cur_stack,
+                                                 (uint8_t *)*first_objp -
+                                                 header_size);
+                       }
+                       rte_errno = ENOBUFS;
+                       return -rte_errno;
+               }
+               while (n-- > 0) {
+                       hdr = (struct bucket_header *)*first_objp;
+                       hdr->lcore_id = rte_lcore_id();
+                       *first_objp++ = (uint8_t *)hdr + header_size;
+               }
+       }
+
+       return 0;
+}
+
 static void
 count_underfilled_buckets(struct rte_mempool *mp,
                          void *opaque,
@@ -548,6 +588,16 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
        return n_objs;
 }
 
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+       struct bucket_data *bd = mp->pool_data;
+
+       info->contig_block_size = bd->obj_per_bucket;
+       return 0;
+}
+
+
 static const struct rte_mempool_ops ops_bucket = {
        .name = "bucket",
        .alloc = bucket_alloc,
@@ -557,6 +607,8 @@ static const struct rte_mempool_ops ops_bucket = {
        .get_count = bucket_get_count,
        .calc_mem_size = bucket_calc_mem_size,
        .populate = bucket_populate,
+       .get_info = bucket_get_info,
+       .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
 };