From: Lazaros Koromilas Date: Tue, 28 Jun 2016 23:47:36 +0000 (+0100) Subject: mempool: deprecate specific get and put functions X-Git-Tag: spdx-start~6337 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=656f2d3ede96902202a1a5ffa22870289cf6a9db;p=dpdk.git mempool: deprecate specific get and put functions This commit introduces the API calls: rte_mempool_generic_put(mp, obj_table, n, is_mp) rte_mempool_generic_get(mp, obj_table, n, is_mc) Deprecates the API calls: rte_mempool_mp_put_bulk(mp, obj_table, n) rte_mempool_sp_put_bulk(mp, obj_table, n) rte_mempool_mp_put(mp, obj) rte_mempool_sp_put(mp, obj) rte_mempool_mc_get_bulk(mp, obj_table, n) rte_mempool_sc_get_bulk(mp, obj_table, n) rte_mempool_mc_get(mp, obj_p) rte_mempool_sc_get(mp, obj_p) We also check cookies in one place now. Signed-off-by: Lazaros Koromilas Acked-by: Olivier Matz --- diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 31582d825c..55c2cbc258 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -338,7 +338,7 @@ static int test_mempool_single_producer(void) printf("obj not owned by this mempool\n"); RET_ERR(); } - rte_mempool_sp_put(mp_spsc, obj); + rte_mempool_put(mp_spsc, obj); rte_spinlock_lock(&scsp_spinlock); scsp_obj_table[i] = NULL; rte_spinlock_unlock(&scsp_spinlock); @@ -371,7 +371,7 @@ static int test_mempool_single_consumer(void) rte_spinlock_unlock(&scsp_spinlock); if (i >= MAX_KEEP) continue; - if (rte_mempool_sc_get(mp_spsc, &obj) < 0) + if (rte_mempool_get(mp_spsc, &obj) < 0) break; rte_spinlock_lock(&scsp_spinlock); scsp_obj_table[i] = obj; @@ -477,13 +477,13 @@ test_mempool_basic_ex(struct rte_mempool *mp) } for (i = 0; i < MEMPOOL_SIZE; i ++) { - if (rte_mempool_mc_get(mp, &obj[i]) < 0) { + if (rte_mempool_get(mp, &obj[i]) < 0) { printf("test_mp_basic_ex fail to get object for [%u]\n", i); goto fail_mp_basic_ex; } } - if (rte_mempool_mc_get(mp, &err_obj) == 0) { + if (rte_mempool_get(mp, &err_obj) == 0) { printf("test_mempool_basic_ex get an impossible obj\n"); goto fail_mp_basic_ex; } @@ -494,7 +494,7 @@ test_mempool_basic_ex(struct rte_mempool *mp) } for (i = 0; i < MEMPOOL_SIZE; i++) - rte_mempool_mp_put(mp, obj[i]); + rte_mempool_put(mp, obj[i]); if (rte_mempool_full(mp) != 1) { printf("test_mempool_basic_ex the mempool should be full\n"); diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index 80f873de66..8bc33590c9 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -38,3 +38,7 @@ Deprecation Notices are respectively replaced by PKT_RX_VLAN_STRIPPED and PKT_RX_QINQ_STRIPPED, that are better described. The old flags and their behavior will be kept in 16.07 and will be removed in 16.11. + +* The mempool functions for single/multi producer/consumer are deprecated and + will be removed in 16.11. + It is replaced by rte_mempool_generic_get/put functions. diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 0a1777c163..f39ad5c90d 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -957,8 +957,8 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp); * Mono-producer (0) or multi-producers (1). */ static inline void __attribute__((always_inline)) -__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n, int is_mp) +__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, + unsigned n, int is_mp) { struct rte_mempool_cache *cache; uint32_t index; @@ -1016,6 +1016,27 @@ ring_enqueue: /** + * Put several objects back in the mempool. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from the obj_table. + * @param is_mp + * Mono-producer (0) or multi-producers (1). + */ +static inline void __attribute__((always_inline)) +rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, + unsigned n, int is_mp) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_generic_put(mp, obj_table, n, is_mp); +} + +/** + * @deprecated * Put several objects back in the mempool (multi-producers safe). * * @param mp @@ -1025,15 +1046,16 @@ ring_enqueue: * @param n * The number of objects to add in the mempool from the obj_table. */ +__rte_deprecated static inline void __attribute__((always_inline)) rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, 1); + rte_mempool_generic_put(mp, obj_table, n, 1); } /** + * @deprecated * Put several objects back in the mempool (NOT multi-producers safe). * * @param mp @@ -1043,12 +1065,12 @@ rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from obj_table. */ -static inline void +__rte_deprecated +static inline void __attribute__((always_inline)) rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, 0); + rte_mempool_generic_put(mp, obj_table, n, 0); } /** @@ -1069,11 +1091,12 @@ static inline void __attribute__((always_inline)) rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT)); + rte_mempool_generic_put(mp, obj_table, n, + !(mp->flags & MEMPOOL_F_SP_PUT)); } /** + * @deprecated * Put one object in the mempool (multi-producers safe). * * @param mp @@ -1081,13 +1104,15 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param obj * A pointer to the object to be added. */ +__rte_deprecated static inline void __attribute__((always_inline)) rte_mempool_mp_put(struct rte_mempool *mp, void *obj) { - rte_mempool_mp_put_bulk(mp, &obj, 1); + rte_mempool_generic_put(mp, &obj, 1, 1); } /** + * @deprecated * Put one object back in the mempool (NOT multi-producers safe). * * @param mp @@ -1095,10 +1120,11 @@ rte_mempool_mp_put(struct rte_mempool *mp, void *obj) * @param obj * A pointer to the object to be added. */ +__rte_deprecated static inline void __attribute__((always_inline)) rte_mempool_sp_put(struct rte_mempool *mp, void *obj) { - rte_mempool_sp_put_bulk(mp, &obj, 1); + rte_mempool_generic_put(mp, &obj, 1, 0); } /** @@ -1134,8 +1160,8 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * - <0: Error; code of ring dequeue function. */ static inline int __attribute__((always_inline)) -__mempool_get_bulk(struct rte_mempool *mp, void **obj_table, - unsigned n, int is_mc) +__mempool_generic_get(struct rte_mempool *mp, void **obj_table, + unsigned n, int is_mc) { int ret; struct rte_mempool_cache *cache; @@ -1197,7 +1223,7 @@ ring_dequeue: } /** - * Get several objects from the mempool (multi-consumers safe). + * Get several objects from the mempool. * * If cache is enabled, objects will be retrieved first from cache, * subsequently from the common pool. Note that it can return -ENOENT when @@ -1210,21 +1236,51 @@ ring_dequeue: * A pointer to a table of void * pointers (objects) that will be filled. * @param n * The number of objects to get from mempool to obj_table. + * @param is_mc + * Mono-consumer (0) or multi-consumers (1). * @return * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ static inline int __attribute__((always_inline)) -rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, + int is_mc) { int ret; - ret = __mempool_get_bulk(mp, obj_table, n, 1); + ret = __mempool_generic_get(mp, obj_table, n, is_mc); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); return ret; } /** + * @deprecated + * Get several objects from the mempool (multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from mempool to obj_table. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +__rte_deprecated +static inline int __attribute__((always_inline)) +rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + return rte_mempool_generic_get(mp, obj_table, n, 1); +} + +/** + * @deprecated * Get several objects from the mempool (NOT multi-consumers safe). * * If cache is enabled, objects will be retrieved first from cache, @@ -1243,14 +1299,11 @@ rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - -ENOENT: Not enough entries in the mempool; no object is * retrieved. */ +__rte_deprecated static inline int __attribute__((always_inline)) rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { - int ret; - ret = __mempool_get_bulk(mp, obj_table, n, 0); - if (ret == 0) - __mempool_check_cookies(mp, obj_table, n, 1); - return ret; + return rte_mempool_generic_get(mp, obj_table, n, 0); } /** @@ -1278,15 +1331,12 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) static inline int __attribute__((always_inline)) rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { - int ret; - ret = __mempool_get_bulk(mp, obj_table, n, - !(mp->flags & MEMPOOL_F_SC_GET)); - if (ret == 0) - __mempool_check_cookies(mp, obj_table, n, 1); - return ret; + return rte_mempool_generic_get(mp, obj_table, n, + !(mp->flags & MEMPOOL_F_SC_GET)); } /** + * @deprecated * Get one object from the mempool (multi-consumers safe). * * If cache is enabled, objects will be retrieved first from cache, @@ -1302,13 +1352,15 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ +__rte_deprecated static inline int __attribute__((always_inline)) rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_mc_get_bulk(mp, obj_p, 1); + return rte_mempool_generic_get(mp, obj_p, 1, 1); } /** + * @deprecated * Get one object from the mempool (NOT multi-consumers safe). * * If cache is enabled, objects will be retrieved first from cache, @@ -1324,10 +1376,11 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ +__rte_deprecated static inline int __attribute__((always_inline)) rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_sc_get_bulk(mp, obj_p, 1); + return rte_mempool_generic_get(mp, obj_p, 1, 0); } /** diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index 9bcbf17c29..6d4fc4a70e 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -22,6 +22,8 @@ DPDK_16.07 { rte_mempool_check_cookies; rte_mempool_create_empty; rte_mempool_free; + rte_mempool_generic_get; + rte_mempool_generic_put; rte_mempool_mem_iter; rte_mempool_obj_iter; rte_mempool_ops_table;