{
return test_completion_status(dev_id, vchan, false) /* without fences */
|| test_completion_status(dev_id, vchan, true); /* with fences */
+}
+
+static int
+test_enqueue_fill(int16_t dev_id, uint16_t vchan)
+{
+ const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
+ struct rte_mbuf *dst;
+ char *dst_data;
+ uint64_t pattern = 0xfedcba9876543210;
+ unsigned int i, j;
+
+ dst = rte_pktmbuf_alloc(pool);
+ if (dst == NULL)
+ ERR_RETURN("Failed to allocate mbuf\n");
+ dst_data = rte_pktmbuf_mtod(dst, char *);
+
+ for (i = 0; i < RTE_DIM(lengths); i++) {
+ /* reset dst_data */
+ memset(dst_data, 0, rte_pktmbuf_data_len(dst));
+
+ /* perform the fill operation */
+ int id = rte_dma_fill(dev_id, vchan, pattern,
+ rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
+ if (id < 0)
+ ERR_RETURN("Error with rte_dma_fill\n");
+ await_hw(dev_id, vchan);
+ if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
+ ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
+ /* check the data from the fill operation is correct */
+ for (j = 0; j < lengths[i]; j++) {
+ char pat_byte = ((char *)&pattern)[j % 8];
+ if (dst_data[j] != pat_byte)
+ ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
+ lengths[i], dst_data[j], pat_byte);
+ }
+ /* check that the data after the fill operation was not written to */
+ for (; j < rte_pktmbuf_data_len(dst); j++)
+ if (dst_data[j] != 0)
+ ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
+ lengths[i], dst_data[j], 0);
+ }
+
+ rte_pktmbuf_free(dst);
+ return 0;
+}
+
+static int
+test_burst_capacity(int16_t dev_id, uint16_t vchan)
+{
+#define CAP_TEST_BURST_SIZE 64
+ const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
+ struct rte_mbuf *src, *dst;
+ int i, j, iter;
+ int cap, ret;
+ bool dma_err;
+
+ src = rte_pktmbuf_alloc(pool);
+ dst = rte_pktmbuf_alloc(pool);
+
+ /* to test capacity, we enqueue elements and check capacity is reduced
+ * by one each time - rebaselining the expected value after each burst
+ * as the capacity is only for a burst. We enqueue multiple bursts to
+ * fill up half the ring, before emptying it again. We do this multiple
+ * times to ensure that we get to test scenarios where we get ring
+ * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
+ */
+ for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
+ for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
+ cap = rte_dma_burst_capacity(dev_id, vchan);
+
+ for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
+ ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
+ rte_pktmbuf_iova(dst), COPY_LEN, 0);
+ if (ret < 0)
+ ERR_RETURN("Error with rte_dmadev_copy\n");
+
+ if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
+ ERR_RETURN("Error, ring capacity did not change as expected\n");
+ }
+ if (rte_dma_submit(dev_id, vchan) < 0)
+ ERR_RETURN("Error, failed to submit burst\n");
+
+ if (cap < rte_dma_burst_capacity(dev_id, vchan))
+ ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
+ }
+ await_hw(dev_id, vchan);
+
+ for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
+ ret = rte_dma_completed(dev_id, vchan,
+ CAP_TEST_BURST_SIZE, NULL, &dma_err);
+ if (ret != CAP_TEST_BURST_SIZE || dma_err) {
+ enum rte_dma_status_code status;
+
+ rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
+ ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
+ ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
+ }
+ }
+ cap = rte_dma_burst_capacity(dev_id, vchan);
+ if (cap != ring_space)
+ ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
+ cap, ring_space);
+ }
+
+ rte_pktmbuf_free(src);
+ rte_pktmbuf_free(dst);
+
+ return 0;
}
static int
.nb_desc = TEST_RINGSIZE,
};
const int vchan = 0;
+ int ret;
+
+ ret = rte_dma_info_get(dev_id, &info);
+ if (ret != 0)
+ ERR_RETURN("Error with rte_dma_info_get()\n");
printf("\n### Test dmadev instance %u [%s]\n",
- dev_id, rte_dma_devices[dev_id].data->dev_name);
+ dev_id, info.dev_name);
- rte_dma_info_get(dev_id, &info);
if (info.max_vchans < 1)
ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
ERR_RETURN("Error with queue configuration\n");
- rte_dma_info_get(dev_id, &info);
- if (info.nb_vchans != 1)
+ ret = rte_dma_info_get(dev_id, &info);
+ if (ret != 0 || info.nb_vchans != 1)
ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
if (rte_dma_start(dev_id) != 0)
if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
ERR_RETURN("Error with rte_dma_stats_get()\n");
+ if (rte_dma_burst_capacity(dev_id, vchan) < 32)
+ ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
+
if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
"submitted = %"PRIu64", errors = %"PRIu64"\n",
if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0)
goto err;
+ /* run some burst capacity tests */
+ if (rte_dma_burst_capacity(dev_id, vchan) < 64)
+ printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
+ dev_id);
+ else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
+ goto err;
+
/* to test error handling we can provide null pointers for source or dest in copies. This
* requires VA mode in DPDK, since NULL(0) is a valid physical address.
* We also need hardware that can report errors back.
dev_id, vchan, !CHECK_ERRS) < 0)
goto err;
+ if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0)
+ printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id);
+ else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0)
+ goto err;
+
rte_mempool_free(pool);
rte_dma_stop(dev_id);
rte_dma_stats_reset(dev_id, vchan);