net/mlx5: support fine grain dynamic flag
[dpdk.git] / drivers / mempool / bucket / rte_mempool_bucket.c
index 24be24e..5ce1ef1 100644 (file)
@@ -42,6 +42,7 @@ struct bucket_data {
        unsigned int header_size;
        unsigned int total_elt_size;
        unsigned int obj_per_bucket;
+       unsigned int bucket_stack_thresh;
        uintptr_t bucket_page_mask;
        struct rte_ring *shared_bucket_ring;
        struct bucket_stack *buckets[RTE_MAX_LCORE];
@@ -139,6 +140,7 @@ bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
               unsigned int n)
 {
        struct bucket_data *bd = mp->pool_data;
+       struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
        unsigned int i;
        int rc = 0;
 
@@ -146,6 +148,15 @@ bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
                rc = bucket_enqueue_single(bd, obj_table[i]);
                RTE_ASSERT(rc == 0);
        }
+       if (local_stack->top > bd->bucket_stack_thresh) {
+               rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+                                     &local_stack->objects
+                                     [bd->bucket_stack_thresh],
+                                     local_stack->top -
+                                     bd->bucket_stack_thresh,
+                                     NULL);
+           local_stack->top = bd->bucket_stack_thresh;
+       }
        return rc;
 }
 
@@ -390,6 +401,11 @@ bucket_alloc(struct rte_mempool *mp)
        struct bucket_data *bd;
        unsigned int i;
        unsigned int bucket_header_size;
+       size_t pg_sz;
+
+       rc = rte_mempool_get_page_size(mp, &pg_sz);
+       if (rc < 0)
+               return rc;
 
        bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
                                RTE_CACHE_LINE_SIZE, mp->socket_id);
@@ -405,10 +421,13 @@ bucket_alloc(struct rte_mempool *mp)
        RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
        bd->header_size = mp->header_size + bucket_header_size;
        bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
-       bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;
+       bd->bucket_mem_size = RTE_MIN(pg_sz,
+                       (size_t)(RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024));
        bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
                bd->total_elt_size;
        bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+       /* eventually this should be a tunable parameter */
+       bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
 
        if (mp->flags & MEMPOOL_F_SP_PUT)
                rg_flags |= RING_F_SP_ENQ;
@@ -572,7 +591,7 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
 
                hdr->fill_cnt = 0;
                hdr->lcore_id = LCORE_ID_ANY;
-               rc = rte_mempool_op_populate_default(mp,
+               rc = rte_mempool_op_populate_helper(mp, 0,
                                                     RTE_MIN(bd->obj_per_bucket,
                                                             max_objs - n_objs),
                                                     iter + bucket_header_sz,