mempool: introduce helpers for populate and required size
[dpdk.git] / drivers / mempool / bucket / rte_mempool_bucket.c
index ef822eb..dfeaf4e 100644 (file)
@@ -42,6 +42,7 @@ struct bucket_data {
        unsigned int header_size;
        unsigned int total_elt_size;
        unsigned int obj_per_bucket;
+       unsigned int bucket_stack_thresh;
        uintptr_t bucket_page_mask;
        struct rte_ring *shared_bucket_ring;
        struct bucket_stack *buckets[RTE_MAX_LCORE];
@@ -139,6 +140,7 @@ bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
               unsigned int n)
 {
        struct bucket_data *bd = mp->pool_data;
+       struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
        unsigned int i;
        int rc = 0;
 
@@ -146,6 +148,15 @@ bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
                rc = bucket_enqueue_single(bd, obj_table[i]);
                RTE_ASSERT(rc == 0);
        }
+       if (local_stack->top > bd->bucket_stack_thresh) {
+               rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+                                     &local_stack->objects
+                                     [bd->bucket_stack_thresh],
+                                     local_stack->top -
+                                     bd->bucket_stack_thresh,
+                                     NULL);
+           local_stack->top = bd->bucket_stack_thresh;
+       }
        return rc;
 }
 
@@ -294,6 +305,46 @@ bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
        return rc;
 }
 
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+                            unsigned int n)
+{
+       struct bucket_data *bd = mp->pool_data;
+       const uint32_t header_size = bd->header_size;
+       struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+       unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+       struct bucket_header *hdr;
+       void **first_objp = first_obj_table;
+
+       bucket_adopt_orphans(bd);
+
+       n -= n_buckets_from_stack;
+       while (n_buckets_from_stack-- > 0) {
+               hdr = bucket_stack_pop_unsafe(cur_stack);
+               *first_objp++ = (uint8_t *)hdr + header_size;
+       }
+       if (n > 0) {
+               if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+                                                  first_objp, n, NULL) != n)) {
+                       /* Return the already dequeued buckets */
+                       while (first_objp-- != first_obj_table) {
+                               bucket_stack_push(cur_stack,
+                                                 (uint8_t *)*first_objp -
+                                                 header_size);
+                       }
+                       rte_errno = ENOBUFS;
+                       return -rte_errno;
+               }
+               while (n-- > 0) {
+                       hdr = (struct bucket_header *)*first_objp;
+                       hdr->lcore_id = rte_lcore_id();
+                       *first_objp++ = (uint8_t *)hdr + header_size;
+               }
+       }
+
+       return 0;
+}
+
 static void
 count_underfilled_buckets(struct rte_mempool *mp,
                          void *opaque,
@@ -369,6 +420,8 @@ bucket_alloc(struct rte_mempool *mp)
        bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
                bd->total_elt_size;
        bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+       /* eventually this should be a tunable parameter */
+       bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
 
        if (mp->flags & MEMPOOL_F_SP_PUT)
                rg_flags |= RING_F_SP_ENQ;
@@ -532,7 +585,7 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
 
                hdr->fill_cnt = 0;
                hdr->lcore_id = LCORE_ID_ANY;
-               rc = rte_mempool_op_populate_default(mp,
+               rc = rte_mempool_op_populate_helper(mp,
                                                     RTE_MIN(bd->obj_per_bucket,
                                                             max_objs - n_objs),
                                                     iter + bucket_header_sz,
@@ -548,6 +601,16 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
        return n_objs;
 }
 
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+       struct bucket_data *bd = mp->pool_data;
+
+       info->contig_block_size = bd->obj_per_bucket;
+       return 0;
+}
+
+
 static const struct rte_mempool_ops ops_bucket = {
        .name = "bucket",
        .alloc = bucket_alloc,
@@ -557,6 +620,8 @@ static const struct rte_mempool_ops ops_bucket = {
        .get_count = bucket_get_count,
        .calc_mem_size = bucket_calc_mem_size,
        .populate = bucket_populate,
+       .get_info = bucket_get_info,
+       .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
 };