1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_common.h>
11 #include <rte_debug.h>
14 #include <rte_malloc.h>
16 #include <rte_mbuf_pool_ops.h>
17 #include <rte_memcpy.h>
18 #include <rte_memory.h>
19 #include <rte_mempool.h>
20 #include <rte_per_lcore.h>
21 #include <rte_rawdev.h>
23 #include "otx2_dpi_rawdev.h"
25 static struct dpi_cring_data_s cring;
28 buffer_fill(uint8_t *addr, int len, uint8_t val)
33 for (j = 0; j < len; j++)
40 validate_buffer(uint8_t *saddr, uint8_t *daddr, int len)
44 for (j = 0; j < len; j++) {
45 if (*(saddr + j) != *(daddr + j)) {
46 otx2_dpi_dbg("FAIL: Data Integrity failed");
47 otx2_dpi_dbg("index: %d, Expected: 0x%x, Actual: 0x%x",
48 j, *(saddr + j), *(daddr + j));
58 dma_test_internal(int dma_port, int buf_size)
60 struct dpi_dma_req_compl_s *comp_data;
61 struct dpi_dma_queue_ctx_s ctx = {0};
62 struct rte_rawdev_buf buf = {0};
63 struct rte_rawdev_buf *d_buf[1];
64 struct rte_rawdev_buf *bufp[1];
65 struct dpi_dma_buf_ptr_s cmd;
66 union dpi_dma_ptr_u rptr = { {0} };
67 union dpi_dma_ptr_u wptr = { {0} };
71 fptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
72 lptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
73 comp_data = rte_malloc("dummy", buf_size, 128);
74 if (fptr == NULL || lptr == NULL || comp_data == NULL) {
75 otx2_dpi_dbg("Unable to allocate internal memory");
79 buffer_fill(fptr, buf_size, 0);
80 memset(&cmd, 0, sizeof(struct dpi_dma_buf_ptr_s));
81 memset(lptr, 0, buf_size);
82 memset(comp_data, 0, buf_size);
83 rptr.s.ptr = (uint64_t)fptr;
84 rptr.s.length = buf_size;
85 wptr.s.ptr = (uint64_t)lptr;
86 wptr.s.length = buf_size;
91 cmd.comp_ptr = comp_data;
92 buf.buf_addr = (void *)&cmd;
95 ctx.xtype = DPI_XTYPE_INTERNAL_ONLY;
99 ret = rte_rawdev_enqueue_buffers(dma_port,
100 (struct rte_rawdev_buf **)bufp, 1,
103 otx2_dpi_dbg("Enqueue request failed");
107 /* Wait and dequeue completion */
110 ret = rte_rawdev_dequeue_buffers(dma_port, &d_buf[0], 1, &ctx);
114 otx2_dpi_dbg("Dequeue request not completed");
117 if (validate_buffer(fptr, lptr, buf_size)) {
118 otx2_dpi_dbg("DMA transfer failed\n");
121 otx2_dpi_dbg("Internal Only DMA transfer successfully completed");
134 dpi_create_mempool(void)
136 void *chunk_pool = NULL;
140 snprintf(pool_name, sizeof(pool_name), "dpi_chunk_pool");
142 chunk_pool = (void *)rte_mempool_create_empty(pool_name, 1024, 1024,
143 0, 0, rte_socket_id(), 0);
144 if (chunk_pool == NULL) {
145 otx2_dpi_dbg("Unable to create memory pool.");
149 ret = rte_mempool_set_ops_byname(chunk_pool,
150 rte_mbuf_platform_mempool_ops(), NULL);
152 otx2_dpi_dbg("Unable to set pool ops");
153 rte_mempool_free(chunk_pool);
157 ret = rte_mempool_populate_default(chunk_pool);
159 otx2_dpi_dbg("Unable to populate pool");
167 test_otx2_dma_rawdev(uint16_t val)
169 struct rte_rawdev_info rdev_info = {0};
170 struct dpi_rawdev_conf_s conf = {0};
171 int ret, i, size = 1024;
175 nb_ports = rte_rawdev_count();
177 otx2_dpi_dbg("No Rawdev ports - bye");
181 i = rte_rawdev_get_dev_id("DPI:5:00.1");
182 /* Configure rawdev ports */
183 conf.chunk_pool = dpi_create_mempool();
184 rdev_info.dev_private = &conf;
185 ret = rte_rawdev_configure(i, (rte_rawdev_obj_t)&rdev_info);
187 otx2_dpi_dbg("Unable to configure DPIVF %d", i);
190 otx2_dpi_dbg("rawdev %d configured successfully", i);
192 /* Each stream allocate its own completion ring data, store it in
193 * application context. Each stream needs to use same application
194 * context for enqueue/dequeue.
196 cring.compl_data = rte_malloc("dummy", sizeof(void *) * 1024, 128);
197 if (!cring.compl_data) {
198 otx2_dpi_dbg("Completion allocation failed");
202 cring.max_cnt = 1024;
206 ret = dma_test_internal(i, size);
208 otx2_dpi_dbg("DMA transfer failed for queue %d", i);
210 if (rte_rawdev_close(i))
211 otx2_dpi_dbg("Dev close failed for port %d", i);
214 rte_mempool_free(conf.chunk_pool);