raw/ioat: rework SW ring layout
[dpdk.git] / drivers / raw / ioat / rte_idxd_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 #ifndef _RTE_IDXD_RAWDEV_FNS_H_
5 #define _RTE_IDXD_RAWDEV_FNS_H_
6
7 /**
8  * @file
9  * This header file contains the implementation of the various ioat
10  * rawdev functions for DSA hardware. The API specification and key
11  * public structures are defined in "rte_ioat_rawdev.h".
12  *
13  * This file should not be included directly, but instead applications should
14  * include "rte_ioat_rawdev.h", which then includes this file - and the
15  * IOAT/CBDMA equivalent header - in turn.
16  */
17
18 #include <stdint.h>
19
20 /*
21  * Defines used in the data path for interacting with IDXD hardware.
22  */
23 #define IDXD_CMD_OP_SHIFT 24
24 enum rte_idxd_ops {
25         idxd_op_nop = 0,
26         idxd_op_batch,
27         idxd_op_drain,
28         idxd_op_memmove,
29         idxd_op_fill
30 };
31
32 #define IDXD_FLAG_FENCE                 (1 << 0)
33 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
34 #define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)
35 #define IDXD_FLAG_CACHE_CONTROL         (1 << 8)
36
37 #define IOAT_COMP_UPDATE_SHIFT  3
38 #define IOAT_CMD_OP_SHIFT       24
39 enum rte_ioat_ops {
40         ioat_op_copy = 0,       /* Standard DMA Operation */
41         ioat_op_fill            /* Block Fill */
42 };
43
44 /**
45  * Hardware descriptor used by DSA hardware, for both bursts and
46  * for individual operations.
47  */
48 struct rte_idxd_hw_desc {
49         uint32_t pasid;
50         uint32_t op_flags;
51         rte_iova_t completion;
52
53         RTE_STD_C11
54         union {
55                 rte_iova_t src;      /* source address for copy ops etc. */
56                 rte_iova_t desc_addr; /* descriptor pointer for batch */
57         };
58         rte_iova_t dst;
59
60         uint32_t size;    /* length of data for op, or batch size */
61
62         uint16_t intr_handle; /* completion interrupt handle */
63
64         /* remaining 26 bytes are reserved */
65         uint16_t __reserved[13];
66 } __rte_aligned(64);
67
68 /**
69  * Completion record structure written back by DSA
70  */
71 struct rte_idxd_completion {
72         uint8_t status;
73         uint8_t result;
74         /* 16-bits pad here */
75         uint32_t completed_size; /* data length, or descriptors for batch */
76
77         rte_iova_t fault_address;
78         uint32_t invalid_flags;
79 } __rte_aligned(32);
80
81 /**
82  * structure used to save the "handles" provided by the user to be
83  * returned to the user on job completion.
84  */
85 struct rte_idxd_user_hdl {
86         uint64_t src;
87         uint64_t dst;
88 };
89
90 /**
91  * @internal
92  * Structure representing an IDXD device instance
93  */
94 struct rte_idxd_rawdev {
95         enum rte_ioat_dev_type type;
96         struct rte_ioat_xstats xstats;
97
98         void *portal; /* address to write the batch descriptor */
99
100         struct rte_ioat_rawdev_config cfg;
101         rte_iova_t desc_iova; /* base address of desc ring, needed for completions */
102
103         /* counters to track the batches */
104         unsigned short max_batches;
105         unsigned short batch_idx_read;
106         unsigned short batch_idx_write;
107         unsigned short *batch_idx_ring; /* store where each batch ends */
108
109         /* track descriptors and handles */
110         unsigned short desc_ring_mask;
111         unsigned short hdls_avail; /* handles for ops completed */
112         unsigned short hdls_read; /* the read pointer for hdls/desc rings */
113         unsigned short batch_start; /* start+size == write pointer for hdls/desc */
114         unsigned short batch_size;
115
116         struct rte_idxd_hw_desc *desc_ring;
117         struct rte_idxd_user_hdl *hdl_ring;
118 };
119
120 static __rte_always_inline rte_iova_t
121 __desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n)
122 {
123         return idxd->desc_iova + (n * sizeof(struct rte_idxd_hw_desc));
124 }
125
126 static __rte_always_inline int
127 __idxd_write_desc(int dev_id,
128                 const uint32_t op_flags,
129                 const rte_iova_t src,
130                 const rte_iova_t dst,
131                 const uint32_t size,
132                 const struct rte_idxd_user_hdl *hdl)
133 {
134         struct rte_idxd_rawdev *idxd =
135                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
136         uint16_t write_idx = idxd->batch_start + idxd->batch_size;
137
138         /* first check batch ring space then desc ring space */
139         if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
140                         idxd->batch_idx_write + 1 == idxd->batch_idx_read)
141                 goto failed;
142         if (((write_idx + 1) & idxd->desc_ring_mask) == idxd->hdls_read)
143                 goto failed;
144
145         /* write desc and handle. Note, descriptors don't wrap */
146         idxd->desc_ring[write_idx].pasid = 0;
147         idxd->desc_ring[write_idx].op_flags = op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID;
148         idxd->desc_ring[write_idx].completion = __desc_idx_to_iova(idxd, write_idx);
149         idxd->desc_ring[write_idx].src = src;
150         idxd->desc_ring[write_idx].dst = dst;
151         idxd->desc_ring[write_idx].size = size;
152
153         idxd->hdl_ring[write_idx & idxd->desc_ring_mask] = *hdl;
154         idxd->batch_size++;
155
156         idxd->xstats.enqueued++;
157
158         rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);
159         return 1;
160
161 failed:
162         idxd->xstats.enqueue_failed++;
163         rte_errno = ENOSPC;
164         return 0;
165 }
166
167 static __rte_always_inline int
168 __idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,
169                 unsigned int length, uintptr_t dst_hdl)
170 {
171         const struct rte_idxd_user_hdl hdl = {
172                         .dst = dst_hdl
173         };
174         return __idxd_write_desc(dev_id,
175                         (idxd_op_fill << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
176                         pattern, dst, length, &hdl);
177 }
178
179 static __rte_always_inline int
180 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
181                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
182 {
183         const struct rte_idxd_user_hdl hdl = {
184                         .src = src_hdl,
185                         .dst = dst_hdl
186         };
187         return __idxd_write_desc(dev_id,
188                         (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
189                         src, dst, length, &hdl);
190 }
191
192 static __rte_always_inline int
193 __idxd_fence(int dev_id)
194 {
195         static const struct rte_idxd_user_hdl null_hdl;
196         /* only op field needs filling - zero src, dst and length */
197         return __idxd_write_desc(dev_id, IDXD_FLAG_FENCE, 0, 0, 0, &null_hdl);
198 }
199
200 static __rte_always_inline void
201 __idxd_movdir64b(volatile void *dst, const struct rte_idxd_hw_desc *src)
202 {
203         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
204                         :
205                         : "a" (dst), "d" (src)
206                         : "memory");
207 }
208
209 static __rte_always_inline int
210 __idxd_perform_ops(int dev_id)
211 {
212         struct rte_idxd_rawdev *idxd =
213                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
214         /* write completion to last desc in the batch */
215         uint16_t comp_idx = idxd->batch_start + idxd->batch_size - 1;
216         if (comp_idx > idxd->desc_ring_mask) {
217                 comp_idx &= idxd->desc_ring_mask;
218                 *((uint64_t *)&idxd->desc_ring[comp_idx]) = 0; /* zero start of desc */
219         }
220
221         if (idxd->batch_size == 0)
222                 return 0;
223
224         _mm_sfence(); /* fence before writing desc to device */
225         if (idxd->batch_size > 1) {
226                 struct rte_idxd_hw_desc batch_desc = {
227                                 .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
228                                         IDXD_FLAG_COMPLETION_ADDR_VALID |
229                                         IDXD_FLAG_REQUEST_COMPLETION,
230                                 .desc_addr = __desc_idx_to_iova(idxd, idxd->batch_start),
231                                 .completion = __desc_idx_to_iova(idxd, comp_idx),
232                                 .size = idxd->batch_size,
233                 };
234
235                 __idxd_movdir64b(idxd->portal, &batch_desc);
236         } else {
237                 /* special case batch size of 1, as not allowed by HW */
238                 /* comp_idx == batch_start */
239                 struct rte_idxd_hw_desc *desc = &idxd->desc_ring[comp_idx];
240                 desc->op_flags |= IDXD_FLAG_COMPLETION_ADDR_VALID |
241                                 IDXD_FLAG_REQUEST_COMPLETION;
242                 desc->completion = __desc_idx_to_iova(idxd, comp_idx);
243
244                 __idxd_movdir64b(idxd->portal, desc);
245         }
246
247         idxd->xstats.started += idxd->batch_size;
248
249         idxd->batch_start += idxd->batch_size;
250         idxd->batch_start &= idxd->desc_ring_mask;
251         idxd->batch_size = 0;
252
253         idxd->batch_idx_ring[idxd->batch_idx_write++] = comp_idx;
254         if (idxd->batch_idx_write > idxd->max_batches)
255                 idxd->batch_idx_write = 0;
256
257         return 0;
258 }
259
260 static __rte_always_inline int
261 __idxd_completed_ops(int dev_id, uint8_t max_ops,
262                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
263 {
264         struct rte_idxd_rawdev *idxd =
265                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
266         unsigned short n, h_idx;
267
268         while (idxd->batch_idx_read != idxd->batch_idx_write) {
269                 uint16_t idx_to_chk = idxd->batch_idx_ring[idxd->batch_idx_read];
270                 volatile struct rte_idxd_completion *comp_to_chk =
271                                 (struct rte_idxd_completion *)&idxd->desc_ring[idx_to_chk];
272                 if (comp_to_chk->status == 0)
273                         break;
274                 /* avail points to one after the last one written */
275                 idxd->hdls_avail = (idx_to_chk + 1) & idxd->desc_ring_mask;
276                 idxd->batch_idx_read++;
277                 if (idxd->batch_idx_read > idxd->max_batches)
278                         idxd->batch_idx_read = 0;
279         }
280
281         if (idxd->cfg.hdls_disable) {
282                 n = (idxd->hdls_avail < idxd->hdls_read) ?
283                                 (idxd->hdls_avail + idxd->desc_ring_mask + 1 - idxd->hdls_read) :
284                                 (idxd->hdls_avail - idxd->hdls_read);
285                 idxd->hdls_read = idxd->hdls_avail;
286                 goto out;
287         }
288
289         for (n = 0, h_idx = idxd->hdls_read;
290                         n < max_ops && h_idx != idxd->hdls_avail; n++) {
291                 src_hdls[n] = idxd->hdl_ring[h_idx].src;
292                 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
293                 if (++h_idx > idxd->desc_ring_mask)
294                         h_idx = 0;
295         }
296         idxd->hdls_read = h_idx;
297
298 out:
299         idxd->xstats.completed += n;
300         return n;
301 }
302
303 #endif