/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* The RTE mempool structure.
*/
struct rte_mempool {
- TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */
-
char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
struct rte_ring *ring; /**< Ring to store objects. */
phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
* Number to add to the object-oriented statistics.
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- mp->stats[__lcore_id].name##_objs += n; \
- mp->stats[__lcore_id].name##_bulk += 1; \
+#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ mp->stats[__lcore_id].name##_objs += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } \
} while(0)
#else
#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
* Calculates size of the mempool header.
* @param mp
* Pointer to the memory pool.
- * @param pgn
+ * @param pgn
* Number of page used to store mempool objects.
*/
#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
- sizeof ((mp)->elt_pa[0]), CACHE_LINE_SIZE))
+ sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
/**
* Returns TRUE if whole mempool is allocated in one contiguous block of memory.
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
#ifndef __INTEL_COMPILER
-#pragma GCC push_options
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
static inline void __mempool_check_cookies(const struct rte_mempool *mp,
if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (put)\n");
}
__mempool_write_header_cookie(obj, 1);
if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (get)\n");
}
__mempool_write_header_cookie(obj, 0);
cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (audit)\n");
}
}
if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad trailer cookie\n");
}
}
}
#ifndef __INTEL_COMPILER
-#pragma GCC pop_options
+#pragma GCC diagnostic error "-Wcast-qual"
#endif
#else
#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
__MEMPOOL_STAT_ADD(mp, put, n);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /* cache is not enabled or single producer */
- if (unlikely(cache_size == 0 || is_mp == 0))
+ /* cache is not enabled or single producer or non-EAL thread */
+ if (unlikely(cache_size == 0 || is_mp == 0 ||
+ lcore_id >= RTE_MAX_LCORE))
goto ring_enqueue;
/* Go straight to ring if put would overflow mem allocated for cache */
unsigned n, int is_mc)
{
int ret;
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- unsigned n_orig = n;
-#endif
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
struct rte_mempool_cache *cache;
uint32_t index, len;
uint32_t cache_size = mp->cache_size;
/* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
+ if (unlikely(cache_size == 0 || is_mc == 0 ||
+ n >= cache_size || lcore_id >= RTE_MAX_LCORE))
goto ring_dequeue;
cache = &mp->local_cache[lcore_id];
cache->len -= n;
- __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
return 0;
ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
if (ret < 0)
- __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
+ __MEMPOOL_STAT_ADD(mp, get_fail, n);
else
- __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
return ret;
}