+ do_multi_copies(dev_id, 0, 0); /* enqueue and complete one batch at a time */
+ do_multi_copies(dev_id, 1, 0); /* enqueue 2 batches and then complete both */
+ do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
+ return 0;
+}
+
+static int
+test_enqueue_fill(int dev_id)
+{
+ const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
+ struct rte_mbuf *dst = rte_pktmbuf_alloc(pool);
+ char *dst_data = rte_pktmbuf_mtod(dst, char *);
+ struct rte_mbuf *completed[2] = {0};
+ uint64_t pattern = 0xfedcba9876543210;
+ unsigned int i, j;
+
+ for (i = 0; i < RTE_DIM(lengths); i++) {
+ /* reset dst_data */
+ memset(dst_data, 0, lengths[i]);
+
+ /* perform the fill operation */
+ if (rte_ioat_enqueue_fill(dev_id, pattern,
+ dst->buf_iova + dst->data_off, lengths[i],
+ (uintptr_t)dst) != 1) {
+ PRINT_ERR("Error with rte_ioat_enqueue_fill\n");
+ return -1;
+ }
+
+ rte_ioat_perform_ops(dev_id);
+ usleep(100);
+
+ if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
+ (void *)&completed[1]) != 1) {
+ PRINT_ERR("Error with completed ops\n");
+ return -1;
+ }
+ /* check the result */
+ for (j = 0; j < lengths[i]; j++) {
+ char pat_byte = ((char *)&pattern)[j % 8];
+ if (dst_data[j] != pat_byte) {
+ PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
+ lengths[i], dst_data[j], pat_byte);
+ return -1;
+ }
+ }
+ }
+
+ rte_pktmbuf_free(dst);
+ return 0;
+}
+
+static int
+test_burst_capacity(int dev_id)
+{
+#define BURST_SIZE 64
+ const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
+ struct rte_mbuf *src, *dst;
+ unsigned int length = 1024;
+ unsigned int i, j, iter;
+ unsigned int old_cap, cap;
+ uintptr_t completions[BURST_SIZE];
+
+ src = rte_pktmbuf_alloc(pool);
+ dst = rte_pktmbuf_alloc(pool);
+
+ old_cap = ring_space;
+ /* to test capacity, we enqueue elements and check capacity is reduced
+ * by one each time - rebaselining the expected value after each burst
+ * as the capacity is only for a burst. We enqueue multiple bursts to
+ * fill up half the ring, before emptying it again. We do this twice to
+ * ensure that we get to test scenarios where we get ring wrap-around
+ */
+ for (iter = 0; iter < 2; iter++) {
+ for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
+ cap = rte_ioat_burst_capacity(dev_id);
+ if (cap > old_cap) {
+ PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
+ return -1;
+ }
+ old_cap = cap;
+
+ for (j = 0; j < BURST_SIZE; j++) {
+ if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
+ rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
+ PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
+ return -1;
+ }
+ if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
+ PRINT_ERR("Error, ring capacity did not change as expected\n");
+ return -1;
+ }
+ }
+ rte_ioat_perform_ops(dev_id);
+ }
+ usleep(100);
+ for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
+ if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
+ NULL, NULL,
+ completions, completions) != BURST_SIZE) {
+ PRINT_ERR("Error with completions\n");
+ return -1;
+ }
+ }
+ if (rte_ioat_burst_capacity(dev_id) != ring_space) {
+ PRINT_ERR("Error, ring capacity has not reset to original value\n");
+ return -1;
+ }
+ old_cap = ring_space;
+ }
+
+ rte_pktmbuf_free(src);
+ rte_pktmbuf_free(dst);