X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fraw%2Fioat%2Fioat_rawdev_test.c;h=5e33669699543107df6fe30506e559ff1ddf6410;hb=e8db4413cba512680e16717a2e6eb89c429ea2c8;hp=a5064d739d1626f7817498c8ac3f82a84e4c062c;hpb=55918bff36c08fd8b8665f7f6a96116ba35cbd50;p=dpdk.git diff --git a/drivers/raw/ioat/ioat_rawdev_test.c b/drivers/raw/ioat/ioat_rawdev_test.c index a5064d739d..5e33669699 100644 --- a/drivers/raw/ioat/ioat_rawdev_test.c +++ b/drivers/raw/ioat/ioat_rawdev_test.c @@ -73,13 +73,15 @@ do_multi_copies(int dev_id, int split_batches, int split_completions) if (split_completions) { /* gather completions in two halves */ uint16_t half_len = RTE_DIM(srcs) / 2; - if (rte_ioat_completed_ops(dev_id, half_len, (void *)completed_src, + if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL, + (void *)completed_src, (void *)completed_dst) != half_len) { PRINT_ERR("Error with rte_ioat_completed_ops - first half request\n"); rte_rawdev_dump(dev_id, stdout); return -1; } - if (rte_ioat_completed_ops(dev_id, half_len, (void *)&completed_src[half_len], + if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL, + (void *)&completed_src[half_len], (void *)&completed_dst[half_len]) != half_len) { PRINT_ERR("Error with rte_ioat_completed_ops - second half request\n"); rte_rawdev_dump(dev_id, stdout); @@ -87,7 +89,8 @@ do_multi_copies(int dev_id, int split_batches, int split_completions) } } else { /* gather all completions in one go */ - if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src, + if (rte_ioat_completed_ops(dev_id, RTE_DIM(completed_src), NULL, NULL, + (void *)completed_src, (void *)completed_dst) != RTE_DIM(srcs)) { PRINT_ERR("Error with rte_ioat_completed_ops\n"); rte_rawdev_dump(dev_id, stdout); @@ -151,7 +154,7 @@ test_enqueue_copies(int dev_id) rte_ioat_perform_ops(dev_id); usleep(10); - if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0], + if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0], (void *)&completed[1]) != 1) { PRINT_ERR("Error with rte_ioat_completed_ops\n"); return -1; @@ -170,6 +173,13 @@ test_enqueue_copies(int dev_id) } rte_pktmbuf_free(src); rte_pktmbuf_free(dst); + + /* check ring is now empty */ + if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0], + (void *)&completed[1]) != 0) { + PRINT_ERR("Error: got unexpected returned handles from rte_ioat_completed_ops\n"); + return -1; + } } while (0); /* test doing a multiple single copies */ @@ -203,9 +213,11 @@ test_enqueue_copies(int dev_id) } usleep(10); - if (rte_ioat_completed_ops(dev_id, max_completions, (void *)&completed[0], + if (rte_ioat_completed_ops(dev_id, max_completions, NULL, NULL, + (void *)&completed[0], (void *)&completed[max_completions]) != max_ops) { PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); return -1; } if (completed[0] != src || completed[max_completions] != dst) { @@ -255,7 +267,7 @@ test_enqueue_fill(int dev_id) rte_ioat_perform_ops(dev_id); usleep(100); - if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0], + if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0], (void *)&completed[1]) != 1) { PRINT_ERR("Error with completed ops\n"); return -1; @@ -265,8 +277,7 @@ test_enqueue_fill(int dev_id) char pat_byte = ((char *)&pattern)[j % 8]; if (dst_data[j] != pat_byte) { PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n", - lengths[i], dst_data[j], - pat_byte); + lengths[i], dst_data[j], pat_byte); return -1; } } @@ -276,10 +287,313 @@ test_enqueue_fill(int dev_id) return 0; } +static int +test_burst_capacity(int dev_id) +{ +#define BURST_SIZE 64 + const unsigned int ring_space = rte_ioat_burst_capacity(dev_id); + struct rte_mbuf *src, *dst; + unsigned int length = 1024; + unsigned int i, j, iter; + unsigned int old_cap, cap; + uintptr_t completions[BURST_SIZE]; + + src = rte_pktmbuf_alloc(pool); + dst = rte_pktmbuf_alloc(pool); + + old_cap = ring_space; + /* to test capacity, we enqueue elements and check capacity is reduced + * by one each time - rebaselining the expected value after each burst + * as the capacity is only for a burst. We enqueue multiple bursts to + * fill up half the ring, before emptying it again. We do this twice to + * ensure that we get to test scenarios where we get ring wrap-around + */ + for (iter = 0; iter < 2; iter++) { + for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) { + cap = rte_ioat_burst_capacity(dev_id); + if (cap > old_cap) { + PRINT_ERR("Error, avail ring capacity has gone up, not down\n"); + return -1; + } + old_cap = cap; + + for (j = 0; j < BURST_SIZE; j++) { + if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src), + rte_pktmbuf_iova(dst), length, 0, 0) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy\n"); + return -1; + } + if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) { + PRINT_ERR("Error, ring capacity did not change as expected\n"); + return -1; + } + } + rte_ioat_perform_ops(dev_id); + } + usleep(100); + for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) { + if (rte_ioat_completed_ops(dev_id, BURST_SIZE, + NULL, NULL, + completions, completions) != BURST_SIZE) { + PRINT_ERR("Error with completions\n"); + return -1; + } + } + if (rte_ioat_burst_capacity(dev_id) != ring_space) { + PRINT_ERR("Error, ring capacity has not reset to original value\n"); + return -1; + } + old_cap = ring_space; + } + + rte_pktmbuf_free(src); + rte_pktmbuf_free(dst); + + return 0; +} + +static int +test_completion_status(int dev_id) +{ +#define COMP_BURST_SZ 16 + const unsigned int fail_copy[] = {0, 7, 15}; + struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ]; + struct rte_mbuf *completed_src[COMP_BURST_SZ * 2]; + struct rte_mbuf *completed_dst[COMP_BURST_SZ * 2]; + unsigned int length = 1024; + unsigned int i; + uint8_t not_ok = 0; + + /* Test single full batch statuses */ + for (i = 0; i < RTE_DIM(fail_copy); i++) { + uint32_t status[COMP_BURST_SZ] = {0}; + unsigned int j; + + for (j = 0; j < COMP_BURST_SZ; j++) { + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + + if (rte_ioat_enqueue_copy(dev_id, + (j == fail_copy[i] ? (phys_addr_t)NULL : + (srcs[j]->buf_iova + srcs[j]->data_off)), + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j); + return -1; + } + } + rte_ioat_perform_ops(dev_id); + usleep(100); + + if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, ¬_ok, + (void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) { + PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + if (not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS) { + unsigned int j; + PRINT_ERR("Error, missing expected failed copy, %u\n", fail_copy[i]); + for (j = 0; j < COMP_BURST_SZ; j++) + printf("%u ", status[j]); + printf("<-- Statuses\n"); + return -1; + } + for (j = 0; j < COMP_BURST_SZ; j++) { + rte_pktmbuf_free(completed_src[j]); + rte_pktmbuf_free(completed_dst[j]); + } + } + + /* Test gathering status for two batches at once */ + for (i = 0; i < RTE_DIM(fail_copy); i++) { + uint32_t status[COMP_BURST_SZ] = {0}; + unsigned int batch, j; + unsigned int expected_failures = 0; + + for (batch = 0; batch < 2; batch++) { + for (j = 0; j < COMP_BURST_SZ/2; j++) { + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + + if (j == fail_copy[i]) + expected_failures++; + if (rte_ioat_enqueue_copy(dev_id, + (j == fail_copy[i] ? (phys_addr_t)NULL : + (srcs[j]->buf_iova + srcs[j]->data_off)), + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", + j); + return -1; + } + } + rte_ioat_perform_ops(dev_id); + } + usleep(100); + + if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, ¬_ok, + (void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) { + PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + if (not_ok != expected_failures) { + unsigned int j; + PRINT_ERR("Error, missing expected failed copy, got %u, not %u\n", + not_ok, expected_failures); + for (j = 0; j < COMP_BURST_SZ; j++) + printf("%u ", status[j]); + printf("<-- Statuses\n"); + return -1; + } + for (j = 0; j < COMP_BURST_SZ; j++) { + rte_pktmbuf_free(completed_src[j]); + rte_pktmbuf_free(completed_dst[j]); + } + } + + /* Test gathering status for half batch at a time */ + for (i = 0; i < RTE_DIM(fail_copy); i++) { + uint32_t status[COMP_BURST_SZ] = {0}; + unsigned int j; + + for (j = 0; j < COMP_BURST_SZ; j++) { + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + + if (rte_ioat_enqueue_copy(dev_id, + (j == fail_copy[i] ? (phys_addr_t)NULL : + (srcs[j]->buf_iova + srcs[j]->data_off)), + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j); + return -1; + } + } + rte_ioat_perform_ops(dev_id); + usleep(100); + + if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, ¬_ok, + (void *)completed_src, + (void *)completed_dst) != (COMP_BURST_SZ / 2)) { + PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + if (fail_copy[i] < COMP_BURST_SZ / 2 && + (not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS)) { + PRINT_ERR("Missing expected failure in first half-batch\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, ¬_ok, + (void *)&completed_src[COMP_BURST_SZ / 2], + (void *)&completed_dst[COMP_BURST_SZ / 2]) != (COMP_BURST_SZ / 2)) { + PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + if (fail_copy[i] >= COMP_BURST_SZ / 2 && (not_ok != 1 || + status[fail_copy[i] - (COMP_BURST_SZ / 2)] + == RTE_IOAT_OP_SUCCESS)) { + PRINT_ERR("Missing expected failure in second half-batch\n"); + rte_rawdev_dump(dev_id, stdout); + return -1; + } + + for (j = 0; j < COMP_BURST_SZ; j++) { + rte_pktmbuf_free(completed_src[j]); + rte_pktmbuf_free(completed_dst[j]); + } + } + + /* Test gathering statuses with fence */ + for (i = 1; i < RTE_DIM(fail_copy); i++) { + uint32_t status[COMP_BURST_SZ * 2] = {0}; + unsigned int j; + uint16_t count; + + for (j = 0; j < COMP_BURST_SZ; j++) { + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + + /* always fail the first copy */ + if (rte_ioat_enqueue_copy(dev_id, + (j == 0 ? (phys_addr_t)NULL : + (srcs[j]->buf_iova + srcs[j]->data_off)), + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j); + return -1; + } + /* put in a fence which will stop any further transactions + * because we had a previous failure. + */ + if (j == fail_copy[i]) + rte_ioat_fence(dev_id); + } + rte_ioat_perform_ops(dev_id); + usleep(100); + + count = rte_ioat_completed_ops(dev_id, COMP_BURST_SZ * 2, status, ¬_ok, + (void *)completed_src, (void *)completed_dst); + if (count != COMP_BURST_SZ) { + PRINT_ERR("Error with rte_ioat_completed_ops, got %u not %u\n", + count, COMP_BURST_SZ); + for (j = 0; j < count; j++) + printf("%u ", status[j]); + printf("<-- Statuses\n"); + return -1; + } + if (not_ok != COMP_BURST_SZ - fail_copy[i]) { + PRINT_ERR("Unexpected failed copy count, got %u, expected %u\n", + not_ok, COMP_BURST_SZ - fail_copy[i]); + for (j = 0; j < COMP_BURST_SZ; j++) + printf("%u ", status[j]); + printf("<-- Statuses\n"); + return -1; + } + if (status[0] == RTE_IOAT_OP_SUCCESS || status[0] == RTE_IOAT_OP_SKIPPED) { + PRINT_ERR("Error, op 0 unexpectedly did not fail.\n"); + return -1; + } + for (j = 1; j <= fail_copy[i]; j++) { + if (status[j] != RTE_IOAT_OP_SUCCESS) { + PRINT_ERR("Error, op %u unexpectedly failed\n", j); + return -1; + } + } + for (j = fail_copy[i] + 1; j < COMP_BURST_SZ; j++) { + if (status[j] != RTE_IOAT_OP_SKIPPED) { + PRINT_ERR("Error, all descriptors after fence should be invalid\n"); + return -1; + } + } + for (j = 0; j < COMP_BURST_SZ; j++) { + rte_pktmbuf_free(completed_src[j]); + rte_pktmbuf_free(completed_dst[j]); + } + } + + return 0; +} + int ioat_rawdev_test(uint16_t dev_id) { #define IOAT_TEST_RINGSIZE 512 + const struct rte_idxd_rawdev *idxd = + (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; + const enum rte_ioat_dev_type ioat_type = idxd->type; struct rte_ioat_rawdev_config p = { .ring_size = -1 }; struct rte_rawdev_info info = { .dev_private = &p }; struct rte_rawdev_xstats_name *snames = NULL; @@ -320,7 +634,7 @@ ioat_rawdev_test(uint16_t dev_id) } pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL", - 256, /* n == num elements */ + p.ring_size * 2, /* n == num elements */ 32, /* cache size */ 0, /* priv size */ 2048, /* data room size */ @@ -384,6 +698,19 @@ ioat_rawdev_test(uint16_t dev_id) } printf("\n"); + printf("Running Burst Capacity Test\n"); + if (test_burst_capacity(dev_id) != 0) + goto err; + + /* only DSA devices report address errors, and we can only use null pointers + * to generate those errors when DPDK is in VA mode. + */ + if (rte_eal_iova_mode() == RTE_IOVA_VA && ioat_type == RTE_IDXD_DEV) { + printf("Running Completions Status Test\n"); + if (test_completion_status(dev_id) != 0) + goto err; + } + rte_rawdev_stop(dev_id); if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) { PRINT_ERR("Error resetting xstat values\n");