return;
}
- if (!strcmp(res->set, "set_quota")) {
- ret = rte_ring_set_bulk_count(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set quota\n");
- }
- else if (!strcmp(res->set, "set_watermark")) {
+ if (!strcmp(res->set, "set_watermark")) {
ret = rte_ring_set_water_mark(r, res->value);
if (ret != 0)
cmdline_printf(cl, "Cannot set water mark\n");
cmdline_parse_token_string_t cmd_set_ring_set =
TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_quota#set_watermark");
+ "set_watermark");
cmdline_parse_token_string_t cmd_set_ring_name =
TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
cmdline_parse_inst_t cmd_set_ring = {
.f = cmd_set_ring_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
- .help_str = "set quota/watermark: "
- "set_quota|set_watermark <ring_name> <value>",
+ .help_str = "set watermark: "
+ "set_watermark <ring_name> <value>",
.tokens = { /* token list, NULL terminated */
(void *)&cmd_set_ring_set,
(void *)&cmd_set_ring_name,
}
wait(&status);
printf("Child process status: %d\n", status);
+#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
if(!WIFEXITED(status) || WEXITSTATUS(status) != (uint8_t)exit_val){
- printf("Child process terminated with incorrect return code!\n");
+ printf("Child process terminated with incorrect status (expected = %d)!\n",
+ exit_val);
return -1;
}
-
+#endif
return 0;
}
* a duplicate error number that conflicts with errno.h */
rte_snprintf(expected_libc_retval, sizeof(expected_libc_retval),
unknown_code_result, rte_errs[i]);
- if (strcmp(expected_libc_retval, libc_retval) != 0){
+ if ((strcmp(expected_libc_retval, libc_retval) != 0) &&
+ (strcmp("", libc_retval) != 0)){
printf("Error, duplicate error code %d\n", rte_errs[i]);
return -1;
}
rte_retval, libc_retval);
if ((strcmp(rte_retval, libc_retval) != 0) ||
(strcmp(expected_libc_retval, libc_retval) != 0)){
- printf("Failed test for RTE_MAX_ERRNO + 1 value\n");
- return -1;
+ if (strcmp("", libc_retval) != 0){
+ printf("Failed test for RTE_MAX_ERRNO + 1 value\n");
+ return -1;
+ }
}
return 0;
#include "test.h"
-#define ITERATIONS (1 << 20)
-#define BATCH_SIZE (1 << 13)
-
#define TEST_LPM_ASSERT(cond) do { \
if (!(cond)) { \
printf("Error at line %d: \n", __LINE__); \
} \
} while(0)
-
-
typedef int32_t (* rte_lpm_test)(void);
static int32_t test0(void);
static int32_t test15(void);
static int32_t test16(void);
static int32_t test17(void);
-static int32_t test18(void);
+static int32_t perf_test(void);
rte_lpm_test tests[] = {
/* Test Cases */
test15,
test16,
test17,
- test18
+ perf_test,
};
#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
return PASS;
}
-/* TEST test15
- *
- * Lookup performance test using Mae West Routing Table
- */
-static inline uint32_t
-depth_to_mask(uint8_t depth) {
- return (int)0x80000000 >> (depth - 1);
-}
-
-static uint32_t
-rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){
- unsigned i, j, count;
-
- count = 0;
- for (i = 0; i < (n - 1); i++) {
- uint8_t depth1 = table[i].depth;
- uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1);
-
- for (j = (i + 1); j <n; j ++) {
- uint8_t depth2 = table[j].depth;
- uint32_t ip2_masked = table[j].ip &
- depth_to_mask(depth2);
-
- if ((depth1 == depth2) && (ip1_masked == ip2_masked)){
- printf("Rule %u is a duplicate of rule %u\n",
- j, i);
- count ++;
- }
- }
- }
-
- return count;
-}
-
-static int32_t
-rule_table_characterisation(const struct route_rule *table, uint32_t n){
- unsigned i, j;
-
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------------- \n");
- /* Count depths. */
- for(i = 1; i <= 32; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++) {
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
- }
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
-
- printf("%u - %5u (%.2f)\n",
- i, depth_counter, percent_hits);
- }
-
- return 0;
-}
-
-static inline uint64_t
-div64(uint64_t dividend, uint64_t divisor)
-{
- return ((2 * dividend) + divisor) / (2 * divisor);
-}
-
-int32_t
-test15(void)
-{
- struct rte_lpm *lpm = NULL;
- uint64_t begin, end, total_time, lpm_used_entries = 0;
- unsigned avg_ticks, i, j;
- uint8_t next_hop_add = 0, next_hop_return = 0;
- int32_t status = 0;
-
- printf("Using Mae West routing table from www.oiforum.com\n");
- printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
- printf("No. duplicate routes = %u\n\n", (unsigned)
- rule_table_check_for_duplicates(mae_west_tbl, NUM_ROUTE_ENTRIES));
- printf("Route distribution per prefix width: \n");
- rule_table_characterisation(mae_west_tbl,
- (uint32_t) NUM_ROUTE_ENTRIES);
- printf("\n");
-
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000,
- RTE_LPM_MEMZONE);
- TEST_LPM_ASSERT(lpm != NULL);
-
- next_hop_add = 1;
-
- /* Add */
- /* Begin Timer. */
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_add(lpm, ip, depth, next_hop_add) */
- status += rte_lpm_add(lpm, mae_west_tbl[i].ip,
- mae_west_tbl[i].depth, next_hop_add);
- }
- /* End Timer. */
- end = rte_rdtsc();
-
- TEST_LPM_ASSERT(status == 0);
-
- /* Calculate average cycles per add. */
- avg_ticks = (uint32_t) div64((end - begin),
- (uint64_t) NUM_ROUTE_ENTRIES);
-
- uint64_t cache_line_counter = 0;
- uint64_t count = 0;
-
- /* Obtain add statistics. */
- for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
- if (lpm->tbl24[i].valid)
- lpm_used_entries++;
-
- if (i % 32 == 0){
- if (count < lpm_used_entries) {
- cache_line_counter++;
- count = lpm_used_entries;
- }
- }
- }
-
- printf("Number of table 24 entries = %u\n",
- (unsigned) RTE_LPM_TBL24_NUM_ENTRIES);
- printf("Used table 24 entries = %u\n",
- (unsigned) lpm_used_entries);
- printf("Percentage of table 24 entries used = %u\n",
- (unsigned) div64((lpm_used_entries * 100) ,
- RTE_LPM_TBL24_NUM_ENTRIES));
- printf("64 byte Cache entries used = %u \n",
- (unsigned) cache_line_counter);
- printf("Cache Required = %u bytes\n\n",
- (unsigned) cache_line_counter * 64);
-
- printf("Average LPM Add: %u cycles\n", avg_ticks);
-
- /* Lookup */
-
- /* Choose random seed. */
- rte_srand(0);
- total_time = 0;
- status = 0;
- for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) {
- static uint32_t ip_batch[BATCH_SIZE];
- uint64_t begin_batch, end_batch;
-
- /* Generate a batch of random numbers */
- for (j = 0; j < BATCH_SIZE; j ++) {
- ip_batch[j] = rte_rand();
- }
-
- /* Lookup per batch */
- begin_batch = rte_rdtsc();
-
- for (j = 0; j < BATCH_SIZE; j ++) {
- status += rte_lpm_lookup(lpm, ip_batch[j],
- &next_hop_return);
- }
-
- end_batch = rte_rdtsc();
- printf("status = %d\r", next_hop_return);
- TEST_LPM_ASSERT(status < 1);
-
- /* Accumulate batch time */
- total_time += (end_batch - begin_batch);
-
- TEST_LPM_ASSERT((status < -ENOENT) ||
- (next_hop_return == next_hop_add));
- }
-
- avg_ticks = (uint32_t) div64(total_time, ITERATIONS);
- printf("Average LPM Lookup: %u cycles\n", avg_ticks);
-
- /* Delete */
- status = 0;
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_delete(lpm, ip, depth) */
- status += rte_lpm_delete(lpm, mae_west_tbl[i].ip,
- mae_west_tbl[i].depth);
- }
-
- end = rte_rdtsc();
-
- TEST_LPM_ASSERT(status == 0);
-
- avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES);
-
- printf("Average LPM Delete: %u cycles\n", avg_ticks);
-
- rte_lpm_delete_all(lpm);
- rte_lpm_free(lpm);
-
- return PASS;
-}
-
-
-
/*
- * Sequence of operations for find existing fbk hash table
+ * Sequence of operations for find existing lpm table
*
* - create table
* - find existing table: hit
* - find non-existing table: miss
*
*/
-int32_t test16(void)
+int32_t
+test15(void)
{
struct rte_lpm *lpm = NULL, *result = NULL;
* test failure condition of overloading the tbl8 so no more will fit
* Check we get an error return value in that case
*/
-static int32_t
-test17(void)
+int32_t
+test16(void)
{
uint32_t ip;
struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
- 256 * 32, RTE_LPM_HEAP);
+ 256 * 32, 0);
- printf("Testing filling tbl8's\n");
-
- /* ip loops through all positibilities for top 24 bits of address */
+ /* ip loops through all possibilities for top 24 bits of address */
for (ip = 0; ip < 0xFFFFFF; ip++){
- /* add an entrey within a different tbl8 each time, since
+ /* add an entry within a different tbl8 each time, since
* depth >24 and the top 24 bits are different */
if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
break;
}
/*
- * Test 18
* Test for overwriting of tbl8:
* - add rule /32 and lookup
* - add new rule /24 and lookup
* - lookup /32 and /24 rule to ensure the table has not been overwritten.
*/
int32_t
-test18(void)
+test17(void)
{
struct rte_lpm *lpm = NULL;
const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
uint8_t next_hop_return = 0;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
TEST_LPM_ASSERT(lpm != NULL);
- status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32);
- TEST_LPM_ASSERT(status == 0);
+ if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
+ next_hop_ip_10_32)) < 0)
+ return -1;
status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+ uint8_t test_hop_10_32 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
- status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24);
- TEST_LPM_ASSERT(status == 0);
+ if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
+ next_hop_ip_10_24)) < 0)
+ return -1;
status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+ uint8_t test_hop_10_24 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
- status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25);
- TEST_LPM_ASSERT(status == 0);
+ if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
+ next_hop_ip_20_25)) < 0)
+ return -1;
status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
+ uint8_t test_hop_20_25 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
+ if (test_hop_10_32 == test_hop_10_24) {
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
+ if (test_hop_10_24 == test_hop_20_25){
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
rte_lpm_free(lpm);
- printf("%s PASSED\n", __func__);
return PASS;
}
-
/*
- * Do all unit and performance tests.
+ * Lookup performance test
*/
-int
-test_lpm(void)
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE (1 << 12)
+#define BULK_SIZE 32
+
+static void
+print_route_distribution(const struct route_rule *table, uint32_t n)
{
- unsigned test_num;
- int status, global_status;
+ unsigned i, j;
- printf("Running LPM tests...\n"
- "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS);
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
- global_status = 0;
+ /* Count depths. */
+ for(i = 1; i <= 32; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
- for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) {
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
- status = tests[test_num]();
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+int32_t
+perf_test(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint64_t begin, total_time, lpm_used_entries = 0;
+ unsigned i, j;
+ uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ uint64_t cache_line_counter = 0;
+ int64_t count = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measue add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
- printf("LPM Test %u: %s\n", test_num,
- (status < 0) ? "FAIL" : "PASS");
+ printf("Unique added entries = %d\n", status);
+ /* Obtain add statistics. */
+ for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
+ if (lpm->tbl24[i].valid)
+ lpm_used_entries++;
+
+ if (i % 32 == 0){
+ if ((uint64_t)count < lpm_used_entries) {
+ cache_line_counter++;
+ count = lpm_used_entries;
+ }
+ }
+ }
+
+ printf("Used table 24 entries = %u (%g%%)\n",
+ (unsigned) lpm_used_entries,
+ (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
+ printf("64 byte Cache entries used = %u (%u bytes)\n",
+ (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
+
+ printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i ++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+
+ for (j = 0; j < BATCH_SIZE; j ++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+
+ for (j = 0; j < BATCH_SIZE; j ++) {
+ if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i ++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint16_t next_hops[BULK_SIZE];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j ++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
+ unsigned k;
+ rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
+ for (k = 0; k < BULK_SIZE; k++)
+ if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm_delete_all(lpm);
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Do all unit and performance tests.
+ */
+
+int
+test_lpm(void)
+{
+ unsigned i;
+ int status, global_status = 0;
+ for (i = 0; i < NUM_LPM_TESTS; i++) {
+ status = tests[i]();
if (status < 0) {
+ printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
global_status = status;
}
}
}
for(j = 0; j < 1000 ; j++) {
if( *(char *)p1 != 0) {
- printf("rte_zmalloc didn't zeroed"
+ printf("rte_zmalloc didn't zero"
"the allocated memory\n");
ret = -1;
}
}
for(j = 0; j < 1000 ; j++) {
if( *(char *)p1 != 0) {
- printf("rte_zmalloc didn't zeroed"
+ printf("rte_zmalloc didn't zero"
"the allocated memory\n");
ret = -1;
}
return ret;
}
-
/* test function inside the malloc lib*/
static int
test_str_to_size(void)
const char *str;
uint64_t value;
} test_values[] =
- {{ "5G", (uint64_t)5 * 1024 * 1024 *1024 },
- {"0x20g", (uint64_t)0x20 * 1024 * 1024 *1024},
- {"10M", 10 * 1024 * 1024},
- {"050m", 050 * 1024 * 1024},
- {"8K", 8 * 1024},
- {"15k", 15 * 1024},
- {"0200", 0200},
- {"0x103", 0x103},
- {"432", 432},
- {"-1", 0}, /* negative values return 0 */
- {" -2", 0},
- {" -3MB", 0},
- {"18446744073709551616", 0} /* ULLONG_MAX + 1 == out of range*/
- };
+ {{ "5G", (uint64_t)5 * 1024 * 1024 *1024 },
+ {"0x20g", (uint64_t)0x20 * 1024 * 1024 *1024},
+ {"10M", 10 * 1024 * 1024},
+ {"050m", 050 * 1024 * 1024},
+ {"8K", 8 * 1024},
+ {"15k", 15 * 1024},
+ {"0200", 0200},
+ {"0x103", 0x103},
+ {"432", 432},
+ {"-1", 0}, /* negative values return 0 */
+ {" -2", 0},
+ {" -3MB", 0},
+ {"18446744073709551616", 0} /* ULLONG_MAX + 1 == out of range*/
+ };
unsigned i;
for (i = 0; i < sizeof(test_values)/sizeof(test_values[0]); i++)
if (rte_str_to_size(test_values[i].str) != test_values[i].value)
static int
test_big_alloc(void)
{
- void *p1 = rte_malloc("BIG", rte_str_to_size(MALLOC_MEMZONE_SIZE) * 2, 1024);
+ int socket = 0;
+ struct rte_malloc_socket_stats pre_stats, post_stats;
+ size_t size =rte_str_to_size(MALLOC_MEMZONE_SIZE)*2;
+ int align = 0;
+#ifndef RTE_LIBRTE_MALLOC_DEBUG
+ int overhead = 64 + 64;
+#else
+ int overhead = 64 + 64 + 64;
+#endif
+
+ rte_malloc_get_socket_stats(socket, &pre_stats);
+
+ void *p1 = rte_malloc_socket("BIG", size , align, socket);
if (!p1)
return -1;
+ rte_malloc_get_socket_stats(socket,&post_stats);
+
+ /* Check statistics reported are correct */
+ /* Allocation increase, cannot be the same as before big allocation */
+ if (post_stats.heap_totalsz_bytes == pre_stats.heap_totalsz_bytes) {
+ printf("Malloc statistics are incorrect - heap_totalz_bytes\n");
+ return -1;
+ }
+ /* Check that allocated size adds up correctly */
+ if (post_stats.heap_allocsz_bytes !=
+ pre_stats.heap_allocsz_bytes + size + align + overhead) {
+ printf("Malloc statistics are incorrect - alloc_size\n");
+ return -1;
+ }
+ /* Check free size against tested allocated size */
+ if (post_stats.heap_freesz_bytes !=
+ post_stats.heap_totalsz_bytes - post_stats.heap_allocsz_bytes) {
+ printf("Malloc statistics are incorrect - heap_freesz_bytes\n");
+ return -1;
+ }
+ /* Number of allocated blocks must increase after allocation */
+ if (post_stats.alloc_count != pre_stats.alloc_count + 1) {
+ printf("Malloc statistics are incorrect - alloc_count\n");
+ return -1;
+ }
+ /* New blocks now available - just allocated 1 but also 1 new free */
+ if(post_stats.free_count != pre_stats.free_count ) {
+ printf("Malloc statistics are incorrect - free_count\n");
+ return -1;
+ }
+
rte_free(p1);
return 0;
}
+static int
+test_multi_alloc_statistics(void)
+{
+ int socket = 0;
+ struct rte_malloc_socket_stats pre_stats, post_stats ,first_stats, second_stats;
+ size_t size = 2048;
+ int align = 1024;
+#ifndef RTE_LIBRTE_MALLOC_DEBUG
+ int trailer_size = 0;
+#else
+ int trailer_size = 64;
+#endif
+ int overhead = 64 + trailer_size;
+
+ rte_malloc_get_socket_stats(socket, &pre_stats);
+
+ void *p1 = rte_malloc_socket("stats", size , align, socket);
+ if (!p1)
+ return -1;
+ rte_free(p1);
+ rte_malloc_dump_stats("stats");
+
+ rte_malloc_get_socket_stats(socket,&post_stats);
+ /* Check statistics reported are correct */
+ /* All post stats should be equal to pre stats after alloc freed */
+ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) &&
+ (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) &&
+ (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&&
+ (post_stats.alloc_count!=pre_stats.alloc_count)&&
+ (post_stats.free_count!=pre_stats.free_count)) {
+ printf("Malloc statistics are incorrect - freed alloc\n");
+ return -1;
+ }
+ /* Check two consecutive allocations */
+ size = 1024;
+ align = 0;
+ rte_malloc_get_socket_stats(socket,&pre_stats);
+ void *p2 = rte_malloc_socket("add", size ,align, socket);
+ if (!p2)
+ return -1;
+ rte_malloc_get_socket_stats(socket,&first_stats);
+
+ void *p3 = rte_malloc_socket("add2", size,align, socket);
+ if (!p3)
+ return -1;
+ rte_malloc_get_socket_stats(socket,&second_stats);
+
+ /*
+ * Check that no new blocks added after small allocations
+ * i.e. < RTE_MALLOC_MEMZONE_SIZE
+ */
+ if(second_stats.heap_totalsz_bytes != first_stats.heap_totalsz_bytes) {
+ printf("Incorrect heap statistics: Total size \n");
+ return -1;
+ }
+ /* Check allocated size is equal to two additions plus overhead */
+ if(second_stats.heap_allocsz_bytes !=
+ size + overhead + first_stats.heap_allocsz_bytes) {
+ printf("Incorrect heap statistics: Allocated size \n");
+ return -1;
+ }
+ /* Check that allocation count increments correctly i.e. +1 */
+ if (second_stats.alloc_count != first_stats.alloc_count + 1) {
+ printf("Incorrect heap statistics: Allocated count \n");
+ return -1;
+ }
+
+ if (second_stats.free_count != first_stats.free_count){
+ printf("Incorrect heap statistics: Free count \n");
+ return -1;
+ }
+
+ /* 2 Free blocks smaller 11M, larger 11M + (11M - 2048) */
+ if (second_stats.greatest_free_size !=
+ (rte_str_to_size(MALLOC_MEMZONE_SIZE) * 2) -
+ 2048 - trailer_size) {
+ printf("Incorrect heap statistics: Greatest free size \n");
+ return -1;
+ }
+ /* Free size must equal the original free size minus the new allocation*/
+ if (first_stats.heap_freesz_bytes <= second_stats.heap_freesz_bytes) {
+ printf("Incorrect heap statistics: Free size \n");
+ return -1;
+ }
+ rte_free(p2);
+ rte_free(p3);
+ /* After freeing both allocations check stats return to original */
+ rte_malloc_get_socket_stats(socket, &post_stats);
+ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) &&
+ (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) &&
+ (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&&
+ (post_stats.alloc_count!=pre_stats.alloc_count)&&
+ (post_stats.free_count!=pre_stats.free_count)) {
+ printf("Malloc statistics are incorrect - freed alloc\n");
+ return -1;
+ }
+ return 0;
+}
+
static int
test_memzone_size_alloc(void)
{
const size_t request_size = 1024;
size_t allocated_size;
char *data_ptr = rte_malloc(NULL, request_size, CACHE_LINE_SIZE);
+#ifdef RTE_LIBRTE_MALLOC_DEBUG
+ int retval;
+ char *over_write_vals = NULL;
+#endif
+
if (data_ptr == NULL) {
printf("%s: %d - Allocation error\n", __func__, __LINE__);
return -1;
err_return();
#ifdef RTE_LIBRTE_MALLOC_DEBUG
- int retval;
- char *over_write_vals = NULL;
/****** change the header to be bad */
char save_buf[64];
return -1;
}
+/* Check if memory is avilable on a specific socket */
+static int
+is_mem_on_socket(int32_t socket)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ unsigned i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (socket == ms[i].socket_id)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Find what socket a memory address is on. Only works for addresses within
+ * memsegs, not heap or stack...
+ */
+static int32_t
+addr_to_socket(void * addr)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ unsigned i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if ((ms[i].addr <= addr) &&
+ ((uintptr_t)addr <
+ ((uintptr_t)ms[i].addr + (uintptr_t)ms[i].len)))
+ return ms[i].socket_id;
+ }
+ return -1;
+}
+
+/* Test using rte_[c|m|zm]alloc_socket() on a specific socket */
+static int
+test_alloc_single_socket(int32_t socket)
+{
+ const char *type = NULL;
+ const size_t size = 10;
+ const unsigned align = 0;
+ char *mem = NULL;
+ int32_t desired_socket = (socket == SOCKET_ID_ANY) ?
+ (int32_t)rte_socket_id() : socket;
+
+ /* Test rte_calloc_socket() */
+ mem = rte_calloc_socket(type, size, sizeof(char), align, socket);
+ if (mem == NULL)
+ return -1;
+ if (addr_to_socket(mem) != desired_socket) {
+ rte_free(mem);
+ return -1;
+ }
+ rte_free(mem);
+
+ /* Test rte_malloc_socket() */
+ mem = rte_malloc_socket(type, size, align, socket);
+ if (mem == NULL)
+ return -1;
+ if (addr_to_socket(mem) != desired_socket) {
+ return -1;
+ }
+ rte_free(mem);
+
+ /* Test rte_zmalloc_socket() */
+ mem = rte_zmalloc_socket(type, size, align, socket);
+ if (mem == NULL)
+ return -1;
+ if (addr_to_socket(mem) != desired_socket) {
+ rte_free(mem);
+ return -1;
+ }
+ rte_free(mem);
+
+ return 0;
+}
+
+static int
+test_alloc_socket(void)
+{
+ unsigned socket_count = 0;
+ unsigned i;
+
+ if (test_alloc_single_socket(SOCKET_ID_ANY) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ if (is_mem_on_socket(i)) {
+ socket_count++;
+ if (test_alloc_single_socket(i) < 0) {
+ printf("Fail: rte_malloc_socket(..., %u) did not succeed\n",
+ i);
+ return -1;
+ }
+ }
+ else {
+ if (test_alloc_single_socket(i) == 0) {
+ printf("Fail: rte_malloc_socket(..., %u) succeeded\n",
+ i);
+ return -1;
+ }
+ }
+ }
+
+ /* Print warnign if only a single socket, but don't fail the test */
+ if (socket_count < 2) {
+ printf("WARNING: alloc_socket test needs memory on multiple sockets!\n");
+ }
+
+ return 0;
+}
+
int
test_malloc(void)
{
return -1;
}
else printf("test_realloc() passed\n");
-/*----------------------------*/
+
+ /*----------------------------*/
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id);
}
return ret;
}
else printf("test_align_overlap_per_lcore() passed\n");
+
/*----------------------------*/
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id);
}
else printf("test_rte_malloc_validate() passed\n");
+ ret = test_alloc_socket();
+ if (ret < 0){
+ printf("test_alloc_socket() failed\n");
+ return ret;
+ }
+ else printf("test_alloc_socket() passed\n");
+
+ ret = test_multi_alloc_statistics();
+ if (ret < 0) {
+ printf("test_muti_alloc_statistics() failed\n");
+ return ret;
+ }
+ else
+ printf("test_muti_alloc_statistics() passed\n");
+
return 0;
}
static struct rte_mempool *refcnt_pool = NULL;
static struct rte_ring *refcnt_mbuf_ring = NULL;
static volatile uint32_t refcnt_stop_slaves;
-static uint32_t refcnt_lcore[RTE_MAX_LCORE];
+static unsigned refcnt_lcore[RTE_MAX_LCORE];
#endif
static int
test_refcnt_slave(__attribute__((unused)) void *arg)
{
- uint32_t lcore, free;
- void *mp;
+ unsigned lcore, free;
+ void *mp = 0;
lcore = rte_lcore_id();
printf("%s started at lcore %u\n", __func__, lcore);
}
static void
-test_refcnt_iter(uint32_t lcore, uint32_t iter)
+test_refcnt_iter(unsigned lcore, unsigned iter)
{
uint16_t ref;
- uint32_t i, n, tref, wn;
+ unsigned i, n, tref, wn;
struct rte_mbuf *m;
tref = 0;
static int
test_refcnt_master(void)
{
- uint32_t i, lcore;
+ unsigned i, lcore;
lcore = rte_lcore_id();
printf("%s started at lcore %u\n", __func__, lcore);
{
#if defined RTE_MBUF_SCATTER_GATHER && defined RTE_MBUF_REFCNT_ATOMIC
- uint32_t lnum, master, slave, tref;
+ unsigned lnum, master, slave, tref;
if ((lnum = rte_lcore_count()) == 1) {
#include <cmdline_parse.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
#include <rte_common.h>
#include "test.h"
if (ms[memseg_idx].len < maxlen)
continue;
- len = ms[memseg_idx].len;
- last_addr = ms[memseg_idx].addr;
+ /* align everything */
+ last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
+ len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
+ len &= ~((uint64_t) CACHE_LINE_MASK);
/* cycle through all memzones */
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
* are allocated sequentially so we don't need to worry about
* them being in the right order.
*/
- len -= (uintptr_t) RTE_PTR_SUB(
+ len -= RTE_PTR_DIFF(
config->mem_config->memzone[memzone_idx].addr,
- (uintptr_t) last_addr);
+ last_addr);
len -= config->mem_config->memzone[memzone_idx].len;
- last_addr =
- RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
+ last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
(size_t) config->mem_config->memzone[memzone_idx].len);
}
}
maxlen = len;
}
+ if (maxlen == 0) {
+ printf("There is no space left!\n");
+ return 0;
+ }
+
mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
if (mz == NULL){
printf("Failed to reserve a big chunk of memory\n");
void* last_addr;
uint64_t maxlen = 0;
+ /* random alignment */
+ rte_srand((unsigned)rte_rdtsc());
+ const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
+
/* get pointer to global configuration */
config = rte_eal_get_configuration();
if (ms[memseg_idx].len < maxlen)
continue;
- len = ms[memseg_idx].len;
- last_addr = ms[memseg_idx].addr;
+ /* align everything */
+ last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
+ len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
+ len &= ~((uint64_t) CACHE_LINE_MASK);
/* cycle through all memzones */
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
/* make sure we get the alignment offset */
if (len > maxlen) {
- addr_offset = RTE_ALIGN_CEIL((uintptr_t) last_addr, 512) - (uintptr_t) last_addr;
+ addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr;
maxlen = len;
}
}
+ if (maxlen == 0 || maxlen == addr_offset) {
+ printf("There is no space left for biggest %u-aligned memzone!\n", align);
+ return 0;
+ }
+
maxlen -= addr_offset;
mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
- SOCKET_ID_ANY, 0, 512);
+ SOCKET_ID_ANY, 0, align);
if (mz == NULL){
printf("Failed to reserve a big chunk of memory\n");
rte_dump_physmem_layout();
}
if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size and alignment 512 did not return"
- " bigest block\n");
+ printf("Memzone reserve with 0 size and alignment %u did not return"
+ " bigest block\n", align);
printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
maxlen, mz->len);
rte_dump_physmem_layout();
SOCKET_ID_ANY, 0, 1024);
printf("check alignments and lengths\n");
+ if (memzone_aligned_32 == NULL) {
+ printf("Unable to reserve 64-byte aligned memzone!\n");
+ return -1;
+ }
if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
return -1;
if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
return -1;
if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
return -1;
+ if (memzone_aligned_128 == NULL) {
+ printf("Unable to reserve 128-byte aligned memzone!\n");
+ return -1;
+ }
if ((memzone_aligned_128->phys_addr & 127) != 0)
return -1;
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
return -1;
if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
return -1;
+ if (memzone_aligned_256 == NULL) {
+ printf("Unable to reserve 256-byte aligned memzone!\n");
+ return -1;
+ }
if ((memzone_aligned_256->phys_addr & 255) != 0)
return -1;
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
return -1;
if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
return -1;
+ if (memzone_aligned_512 == NULL) {
+ printf("Unable to reserve 512-byte aligned memzone!\n");
+ return -1;
+ }
if ((memzone_aligned_512->phys_addr & 511) != 0)
return -1;
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
return -1;
if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
return -1;
+ if (memzone_aligned_1024 == NULL) {
+ printf("Unable to reserve 1024-byte aligned memzone!\n");
+ return -1;
+ }
if ((memzone_aligned_1024->phys_addr & 1023) != 0)
return -1;
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
return -1;
-
/* check that zones don't overlap */
printf("check overlapping\n");
if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
return -1;
- printf("test reserving the largest size memzone possible\n");
- if (test_memzone_reserve_max() < 0)
- return -1;
-
printf("test memzone_reserve flags\n");
if (test_memzone_reserve_flags() < 0)
return -1;
if (test_memzone_invalid_alignment() < 0)
return -1;
+ printf("test reserving the largest size memzone possible\n");
+ if (test_memzone_reserve_max() < 0)
+ return -1;
+
printf("test reserving the largest size aligned memzone possible\n");
if (test_memzone_reserve_max_aligned() < 0)
return -1;
#include <stdarg.h>
#include <unistd.h>
#include <sys/wait.h>
+#include <libgen.h>
+#include <dirent.h>
#include <rte_common.h>
#include <rte_memory.h>
#include <rte_atomic.h>
#include <rte_ring.h>
#include <rte_debug.h>
-#include <stdarg.h>
#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_hash.h>
#define launch_proc(ARGV) process_dup(ARGV, \
sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
+static char*
+get_current_prefix(char * prefix, int size)
+{
+ char path[PATH_MAX] = {0};
+ char buf[PATH_MAX] = {0};
+
+ /* get file for config (fd is always 3) */
+ rte_snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
+
+ /* return NULL on error */
+ if (readlink(path, buf, sizeof(buf)) == -1)
+ return NULL;
+
+ /* get the basename */
+ rte_snprintf(buf, sizeof(buf), "%s", basename(buf));
+
+ /* copy string all the way from second char up to start of _config */
+ rte_snprintf(prefix, size, "%.*s",
+ strnlen(buf, sizeof(buf)) - sizeof("_config"), &buf[1]);
+
+ return prefix;
+}
+
/*
* This function is called in the primary i.e. main test, to spawn off secondary
* processes to run actual mp tests. Uses fork() and exec pair
int ret = 0;
char coremask[10];
+ char tmp[PATH_MAX] = {0};
+ char prefix[PATH_MAX] = {0};
+
+ get_current_prefix(tmp, sizeof(tmp));
+
+ rte_snprintf(prefix, sizeof(prefix), "--file-prefix=%s", tmp);
+
/* good case, using secondary */
const char *argv1[] = {
- prgname, "-c", coremask, "--proc-type=secondary"
+ prgname, "-c", coremask, "--proc-type=secondary",
+ prefix
};
/* good case, using auto */
const char *argv2[] = {
- prgname, "-c", coremask, "--proc-type=auto"
+ prgname, "-c", coremask, "--proc-type=auto",
+ prefix
};
/* bad case, using invalid type */
const char *argv3[] = {
- prgname, "-c", coremask, "--proc-type=ERROR"
+ prgname, "-c", coremask, "--proc-type=ERROR",
+ prefix
};
/* bad case, using invalid file prefix */
const char *argv4[] = {
printf("### Testing object creation - expect lots of mz reserve errors!\n");
rte_errno = 0;
- if (rte_memzone_reserve("test_mz", size, rte_socket_id(), flags) != NULL
- || rte_errno != E_RTE_SECONDARY){
+ if ((rte_memzone_reserve("test_mz", size, rte_socket_id(),
+ flags) == NULL) &&
+ (rte_memzone_lookup("test_mz") == NULL)) {
printf("Error: unexpected return value from rte_memzone_reserve\n");
return -1;
}
printf("# Checked rte_memzone_reserve() OK\n");
rte_errno = 0;
- if (rte_ring_create("test_rng", size, rte_socket_id(), flags) != NULL
- || rte_errno != E_RTE_SECONDARY){
+ if ((rte_ring_create(
+ "test_ring", size, rte_socket_id(), flags) == NULL) &&
+ (rte_ring_lookup("test_ring") == NULL)){
printf("Error: unexpected return value from rte_ring_create()\n");
return -1;
}
printf("# Checked rte_ring_create() OK\n");
-
rte_errno = 0;
- if (rte_mempool_create("test_mp", size, elt_size, cache_size,
- priv_data_size, NULL, NULL, NULL, NULL,
- rte_socket_id(), flags) != NULL
- || rte_errno != E_RTE_SECONDARY){
- printf("Error: unexpected return value from rte_ring_create()\n");
+ if ((rte_mempool_create("test_mp", size, elt_size, cache_size,
+ priv_data_size, NULL, NULL, NULL, NULL,
+ rte_socket_id(), flags) == NULL) &&
+ (rte_mempool_lookup("test_mp") == NULL)){
+ printf("Error: unexpected return value from rte_mempool_create()\n");
return -1;
}
printf("# Checked rte_mempool_create() OK\n");
const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" };
rte_errno=0;
- if (rte_hash_create(&hash_params) != NULL
- || rte_errno != E_RTE_SECONDARY){
- printf("Error: unexpected return value from rte_ring_create()\n");
+ if ((rte_hash_create(&hash_params) != NULL) &&
+ (rte_hash_find_existing(hash_params.name) == NULL)){
+ printf("Error: unexpected return value from rte_hash_create()\n");
return -1;
}
printf("# Checked rte_hash_create() OK\n");
- const struct rte_fbk_hash_params fbk_params = { .name = "test_mp_hash" };
+ const struct rte_fbk_hash_params fbk_params = { .name = "test_fbk_mp_hash" };
rte_errno=0;
- if (rte_fbk_hash_create(&fbk_params) != NULL
- || rte_errno != E_RTE_SECONDARY){
- printf("Error: unexpected return value from rte_ring_create()\n");
+ if ((rte_fbk_hash_create(&fbk_params) != NULL) &&
+ (rte_fbk_hash_find_existing(fbk_params.name) == NULL)){
+ printf("Error: unexpected return value from rte_fbk_hash_create()\n");
return -1;
}
printf("# Checked rte_fbk_hash_create() OK\n");
rte_errno=0;
- if (rte_lpm_create("test_lpm", size, rte_socket_id(), RTE_LPM_HEAP) != NULL
- || rte_errno != E_RTE_SECONDARY){
- printf("Error: unexpected return value from rte_ring_create()\n");
+ if ((rte_lpm_create("test_lpm", size, rte_socket_id(), 0) != NULL) &&
+ (rte_lpm_find_existing("test_lpm") == NULL)){
+ printf("Error: unexpected return value from rte_lpm_create()\n");
return -1;
}
printf("# Checked rte_lpm_create() OK\n");