1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
4 #ifndef _RTE_IDXD_RAWDEV_FNS_H_
5 #define _RTE_IDXD_RAWDEV_FNS_H_
9 * This header file contains the implementation of the various ioat
10 * rawdev functions for DSA hardware. The API specification and key
11 * public structures are defined in "rte_ioat_rawdev.h".
13 * This file should not be included directly, but instead applications should
14 * include "rte_ioat_rawdev.h", which then includes this file - and the
15 * IOAT/CBDMA equivalent header - in turn.
21 * Defines used in the data path for interacting with IDXD hardware.
23 #define IDXD_CMD_OP_SHIFT 24
32 #define IDXD_FLAG_FENCE (1 << 0)
33 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
34 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
35 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
37 #define IOAT_COMP_UPDATE_SHIFT 3
38 #define IOAT_CMD_OP_SHIFT 24
40 ioat_op_copy = 0, /* Standard DMA Operation */
41 ioat_op_fill /* Block Fill */
45 * Hardware descriptor used by DSA hardware, for both bursts and
46 * for individual operations.
48 struct rte_idxd_hw_desc {
51 rte_iova_t completion;
55 rte_iova_t src; /* source address for copy ops etc. */
56 rte_iova_t desc_addr; /* descriptor pointer for batch */
60 uint32_t size; /* length of data for op, or batch size */
62 uint16_t intr_handle; /* completion interrupt handle */
64 /* remaining 26 bytes are reserved */
65 uint16_t __reserved[13];
69 * Completion record structure written back by DSA
71 struct rte_idxd_completion {
74 /* 16-bits pad here */
75 uint32_t completed_size; /* data length, or descriptors for batch */
77 rte_iova_t fault_address;
78 uint32_t invalid_flags;
82 * structure used to save the "handles" provided by the user to be
83 * returned to the user on job completion.
85 struct rte_idxd_user_hdl {
92 * Structure representing an IDXD device instance
94 struct rte_idxd_rawdev {
95 enum rte_ioat_dev_type type;
96 struct rte_ioat_xstats xstats;
98 void *portal; /* address to write the batch descriptor */
100 struct rte_ioat_rawdev_config cfg;
101 rte_iova_t desc_iova; /* base address of desc ring, needed for completions */
103 /* counters to track the batches */
104 unsigned short max_batches;
105 unsigned short batch_idx_read;
106 unsigned short batch_idx_write;
107 unsigned short *batch_idx_ring; /* store where each batch ends */
109 /* track descriptors and handles */
110 unsigned short desc_ring_mask;
111 unsigned short hdls_avail; /* handles for ops completed */
112 unsigned short hdls_read; /* the read pointer for hdls/desc rings */
113 unsigned short batch_start; /* start+size == write pointer for hdls/desc */
114 unsigned short batch_size;
116 struct rte_idxd_hw_desc *desc_ring;
117 struct rte_idxd_user_hdl *hdl_ring;
120 static __rte_always_inline rte_iova_t
121 __desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n)
123 return idxd->desc_iova + (n * sizeof(struct rte_idxd_hw_desc));
126 static __rte_always_inline int
127 __idxd_write_desc(int dev_id,
128 const uint32_t op_flags,
129 const rte_iova_t src,
130 const rte_iova_t dst,
132 const struct rte_idxd_user_hdl *hdl)
134 struct rte_idxd_rawdev *idxd =
135 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
136 uint16_t write_idx = idxd->batch_start + idxd->batch_size;
138 /* first check batch ring space then desc ring space */
139 if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
140 idxd->batch_idx_write + 1 == idxd->batch_idx_read)
142 if (((write_idx + 1) & idxd->desc_ring_mask) == idxd->hdls_read)
145 /* write desc and handle. Note, descriptors don't wrap */
146 idxd->desc_ring[write_idx].pasid = 0;
147 idxd->desc_ring[write_idx].op_flags = op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID;
148 idxd->desc_ring[write_idx].completion = __desc_idx_to_iova(idxd, write_idx);
149 idxd->desc_ring[write_idx].src = src;
150 idxd->desc_ring[write_idx].dst = dst;
151 idxd->desc_ring[write_idx].size = size;
153 idxd->hdl_ring[write_idx & idxd->desc_ring_mask] = *hdl;
156 idxd->xstats.enqueued++;
158 rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);
162 idxd->xstats.enqueue_failed++;
167 static __rte_always_inline int
168 __idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,
169 unsigned int length, uintptr_t dst_hdl)
171 const struct rte_idxd_user_hdl hdl = {
174 return __idxd_write_desc(dev_id,
175 (idxd_op_fill << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
176 pattern, dst, length, &hdl);
179 static __rte_always_inline int
180 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
181 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
183 const struct rte_idxd_user_hdl hdl = {
187 return __idxd_write_desc(dev_id,
188 (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
189 src, dst, length, &hdl);
192 static __rte_always_inline int
193 __idxd_fence(int dev_id)
195 static const struct rte_idxd_user_hdl null_hdl;
196 /* only op field needs filling - zero src, dst and length */
197 return __idxd_write_desc(dev_id, IDXD_FLAG_FENCE, 0, 0, 0, &null_hdl);
200 static __rte_always_inline void
201 __idxd_movdir64b(volatile void *dst, const struct rte_idxd_hw_desc *src)
203 asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
205 : "a" (dst), "d" (src)
209 static __rte_always_inline int
210 __idxd_perform_ops(int dev_id)
212 struct rte_idxd_rawdev *idxd =
213 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
214 /* write completion to last desc in the batch */
215 uint16_t comp_idx = idxd->batch_start + idxd->batch_size - 1;
216 if (comp_idx > idxd->desc_ring_mask) {
217 comp_idx &= idxd->desc_ring_mask;
218 *((uint64_t *)&idxd->desc_ring[comp_idx]) = 0; /* zero start of desc */
221 if (idxd->batch_size == 0)
224 _mm_sfence(); /* fence before writing desc to device */
225 if (idxd->batch_size > 1) {
226 struct rte_idxd_hw_desc batch_desc = {
227 .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
228 IDXD_FLAG_COMPLETION_ADDR_VALID |
229 IDXD_FLAG_REQUEST_COMPLETION,
230 .desc_addr = __desc_idx_to_iova(idxd, idxd->batch_start),
231 .completion = __desc_idx_to_iova(idxd, comp_idx),
232 .size = idxd->batch_size,
235 __idxd_movdir64b(idxd->portal, &batch_desc);
237 /* special case batch size of 1, as not allowed by HW */
238 /* comp_idx == batch_start */
239 struct rte_idxd_hw_desc *desc = &idxd->desc_ring[comp_idx];
240 desc->op_flags |= IDXD_FLAG_COMPLETION_ADDR_VALID |
241 IDXD_FLAG_REQUEST_COMPLETION;
242 desc->completion = __desc_idx_to_iova(idxd, comp_idx);
244 __idxd_movdir64b(idxd->portal, desc);
247 idxd->xstats.started += idxd->batch_size;
249 idxd->batch_start += idxd->batch_size;
250 idxd->batch_start &= idxd->desc_ring_mask;
251 idxd->batch_size = 0;
253 idxd->batch_idx_ring[idxd->batch_idx_write++] = comp_idx;
254 if (idxd->batch_idx_write > idxd->max_batches)
255 idxd->batch_idx_write = 0;
260 static __rte_always_inline int
261 __idxd_completed_ops(int dev_id, uint8_t max_ops,
262 uintptr_t *src_hdls, uintptr_t *dst_hdls)
264 struct rte_idxd_rawdev *idxd =
265 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
266 unsigned short n, h_idx;
268 while (idxd->batch_idx_read != idxd->batch_idx_write) {
269 uint16_t idx_to_chk = idxd->batch_idx_ring[idxd->batch_idx_read];
270 volatile struct rte_idxd_completion *comp_to_chk =
271 (struct rte_idxd_completion *)&idxd->desc_ring[idx_to_chk];
272 if (comp_to_chk->status == 0)
274 /* avail points to one after the last one written */
275 idxd->hdls_avail = (idx_to_chk + 1) & idxd->desc_ring_mask;
276 idxd->batch_idx_read++;
277 if (idxd->batch_idx_read > idxd->max_batches)
278 idxd->batch_idx_read = 0;
281 if (idxd->cfg.hdls_disable) {
282 n = (idxd->hdls_avail < idxd->hdls_read) ?
283 (idxd->hdls_avail + idxd->desc_ring_mask + 1 - idxd->hdls_read) :
284 (idxd->hdls_avail - idxd->hdls_read);
285 idxd->hdls_read = idxd->hdls_avail;
289 for (n = 0, h_idx = idxd->hdls_read;
290 n < max_ops && h_idx != idxd->hdls_avail; n++) {
291 src_hdls[n] = idxd->hdl_ring[h_idx].src;
292 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
293 if (++h_idx > idxd->desc_ring_mask)
296 idxd->hdls_read = h_idx;
299 idxd->xstats.completed += n;