unsigned int header_size;
unsigned int total_elt_size;
unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
uintptr_t bucket_page_mask;
struct rte_ring *shared_bucket_ring;
struct bucket_stack *buckets[RTE_MAX_LCORE];
unsigned int n)
{
struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
unsigned int i;
int rc = 0;
rc = bucket_enqueue_single(bd, obj_table[i]);
RTE_ASSERT(rc == 0);
}
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
return rc;
}
return rc;
}
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
static void
count_underfilled_buckets(struct rte_mempool *mp,
void *opaque,
bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
bd->total_elt_size;
bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
if (mp->flags & MEMPOOL_F_SP_PUT)
rg_flags |= RING_F_SP_ENQ;
hdr->fill_cnt = 0;
hdr->lcore_id = LCORE_ID_ANY;
- rc = rte_mempool_op_populate_default(mp,
+ rc = rte_mempool_op_populate_helper(mp,
RTE_MIN(bd->obj_per_bucket,
max_objs - n_objs),
iter + bucket_header_sz,
return n_objs;
}
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
static const struct rte_mempool_ops ops_bucket = {
.name = "bucket",
.alloc = bucket_alloc,
.get_count = bucket_get_count,
.calc_mem_size = bucket_calc_mem_size,
.populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
};