36ba876eab0904242588a497a42e731863865614
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11
12 /**
13  * @internal
14  * Structure representing a device descriptor
15  */
16 struct rte_ioat_generic_hw_desc {
17         uint32_t size;
18         union {
19                 uint32_t control_raw;
20                 struct {
21                         uint32_t int_enable: 1;
22                         uint32_t src_snoop_disable: 1;
23                         uint32_t dest_snoop_disable: 1;
24                         uint32_t completion_update: 1;
25                         uint32_t fence: 1;
26                         uint32_t reserved2: 1;
27                         uint32_t src_page_break: 1;
28                         uint32_t dest_page_break: 1;
29                         uint32_t bundle: 1;
30                         uint32_t dest_dca: 1;
31                         uint32_t hint: 1;
32                         uint32_t reserved: 13;
33                         uint32_t op: 8;
34                 } control;
35         } u;
36         uint64_t src_addr;
37         uint64_t dest_addr;
38         uint64_t next;
39         uint64_t op_specific[4];
40 };
41
42 /**
43  * @internal
44  * Identify the data path to use.
45  * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
46  */
47 enum rte_ioat_dev_type {
48         RTE_IOAT_DEV,
49         RTE_IDXD_DEV,
50 };
51
52 /**
53  * @internal
54  * Structure representing an IOAT device instance
55  */
56 struct rte_ioat_rawdev {
57         enum rte_ioat_dev_type type;
58         struct rte_rawdev *rawdev;
59         const struct rte_memzone *mz;
60         const struct rte_memzone *desc_mz;
61
62         volatile uint16_t *doorbell;
63         phys_addr_t status_addr;
64         phys_addr_t ring_addr;
65
66         unsigned short ring_size;
67         bool hdls_disable;
68         struct rte_ioat_generic_hw_desc *desc_ring;
69         __m128i *hdls; /* completion handles for returning to user */
70
71
72         unsigned short next_read;
73         unsigned short next_write;
74
75         /* some statistics for tracking, if added/changed update xstats fns*/
76         uint64_t enqueue_failed __rte_cache_aligned;
77         uint64_t enqueued;
78         uint64_t started;
79         uint64_t completed;
80
81         /* to report completions, the device will write status back here */
82         volatile uint64_t status __rte_cache_aligned;
83
84         /* pointer to the register bar */
85         volatile struct rte_ioat_registers *regs;
86 };
87
88 #define RTE_IOAT_CHANSTS_IDLE                   0x1
89 #define RTE_IOAT_CHANSTS_SUSPENDED              0x2
90 #define RTE_IOAT_CHANSTS_HALTED                 0x3
91 #define RTE_IOAT_CHANSTS_ARMED                  0x4
92
93 /*
94  * Defines used in the data path for interacting with hardware.
95  */
96 #define IDXD_CMD_OP_SHIFT 24
97 enum rte_idxd_ops {
98         idxd_op_nop = 0,
99         idxd_op_batch,
100         idxd_op_drain,
101         idxd_op_memmove,
102         idxd_op_fill
103 };
104
105 #define IDXD_FLAG_FENCE                 (1 << 0)
106 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
107 #define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)
108 #define IDXD_FLAG_CACHE_CONTROL         (1 << 8)
109
110 /**
111  * Hardware descriptor used by DSA hardware, for both bursts and
112  * for individual operations.
113  */
114 struct rte_idxd_hw_desc {
115         uint32_t pasid;
116         uint32_t op_flags;
117         rte_iova_t completion;
118
119         RTE_STD_C11
120         union {
121                 rte_iova_t src;      /* source address for copy ops etc. */
122                 rte_iova_t desc_addr; /* descriptor pointer for batch */
123         };
124         rte_iova_t dst;
125
126         uint32_t size;    /* length of data for op, or batch size */
127
128         /* 28 bytes of padding here */
129 } __rte_aligned(64);
130
131 /**
132  * Completion record structure written back by DSA
133  */
134 struct rte_idxd_completion {
135         uint8_t status;
136         uint8_t result;
137         /* 16-bits pad here */
138         uint32_t completed_size; /* data length, or descriptors for batch */
139
140         rte_iova_t fault_address;
141         uint32_t invalid_flags;
142 } __rte_aligned(32);
143
144 #define BATCH_SIZE 64
145
146 /**
147  * Structure used inside the driver for building up and submitting
148  * a batch of operations to the DSA hardware.
149  */
150 struct rte_idxd_desc_batch {
151         struct rte_idxd_completion comp; /* the completion record for batch */
152
153         uint16_t submitted;
154         uint16_t op_count;
155         uint16_t hdl_end;
156
157         struct rte_idxd_hw_desc batch_desc;
158
159         /* batches must always have 2 descriptors, so put a null at the start */
160         struct rte_idxd_hw_desc null_desc;
161         struct rte_idxd_hw_desc ops[BATCH_SIZE];
162 };
163
164 /**
165  * structure used to save the "handles" provided by the user to be
166  * returned to the user on job completion.
167  */
168 struct rte_idxd_user_hdl {
169         uint64_t src;
170         uint64_t dst;
171 };
172
173 /**
174  * @internal
175  * Structure representing an IDXD device instance
176  */
177 struct rte_idxd_rawdev {
178         enum rte_ioat_dev_type type;
179         void *portal; /* address to write the batch descriptor */
180
181         /* counters to track the batches and the individual op handles */
182         uint16_t batch_ring_sz;  /* size of batch ring */
183         uint16_t hdl_ring_sz;    /* size of the user hdl ring */
184
185         uint16_t next_batch;     /* where we write descriptor ops */
186         uint16_t next_completed; /* batch where we read completions */
187         uint16_t next_ret_hdl;   /* the next user hdl to return */
188         uint16_t last_completed_hdl; /* the last user hdl that has completed */
189         uint16_t next_free_hdl;  /* where the handle for next op will go */
190         uint16_t hdls_disable;   /* disable tracking completion handles */
191
192         struct rte_idxd_user_hdl *hdl_ring;
193         struct rte_idxd_desc_batch *batch_ring;
194 };
195
196 /*
197  * Enqueue a copy operation onto the ioat device
198  */
199 static __rte_always_inline int
200 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
201                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
202 {
203         struct rte_ioat_rawdev *ioat =
204                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
205         unsigned short read = ioat->next_read;
206         unsigned short write = ioat->next_write;
207         unsigned short mask = ioat->ring_size - 1;
208         unsigned short space = mask + read - write;
209         struct rte_ioat_generic_hw_desc *desc;
210
211         if (space == 0) {
212                 ioat->enqueue_failed++;
213                 return 0;
214         }
215
216         ioat->next_write = write + 1;
217         write &= mask;
218
219         desc = &ioat->desc_ring[write];
220         desc->size = length;
221         /* set descriptor write-back every 16th descriptor */
222         desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
223         desc->src_addr = src;
224         desc->dest_addr = dst;
225
226         if (!ioat->hdls_disable)
227                 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
228                                         (int64_t)src_hdl);
229         rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
230
231         ioat->enqueued++;
232         return 1;
233 }
234
235 /* add fence to last written descriptor */
236 static __rte_always_inline int
237 __ioat_fence(int dev_id)
238 {
239         struct rte_ioat_rawdev *ioat =
240                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
241         unsigned short write = ioat->next_write;
242         unsigned short mask = ioat->ring_size - 1;
243         struct rte_ioat_generic_hw_desc *desc;
244
245         write = (write - 1) & mask;
246         desc = &ioat->desc_ring[write];
247
248         desc->u.control.fence = 1;
249         return 0;
250 }
251
252 /*
253  * Trigger hardware to begin performing enqueued operations
254  */
255 static __rte_always_inline void
256 __ioat_perform_ops(int dev_id)
257 {
258         struct rte_ioat_rawdev *ioat =
259                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
260         ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
261                         .control.completion_update = 1;
262         rte_compiler_barrier();
263         *ioat->doorbell = ioat->next_write;
264         ioat->started = ioat->enqueued;
265 }
266
267 /**
268  * @internal
269  * Returns the index of the last completed operation.
270  */
271 static __rte_always_inline int
272 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
273 {
274         uint64_t status = ioat->status;
275
276         /* lower 3 bits indicate "transfer status" : active, idle, halted.
277          * We can ignore bit 0.
278          */
279         *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
280         return (status - ioat->ring_addr) >> 6;
281 }
282
283 /*
284  * Returns details of operations that have been completed
285  */
286 static __rte_always_inline int
287 __ioat_completed_ops(int dev_id, uint8_t max_copies,
288                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
289 {
290         struct rte_ioat_rawdev *ioat =
291                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
292         unsigned short mask = (ioat->ring_size - 1);
293         unsigned short read = ioat->next_read;
294         unsigned short end_read, count;
295         int error;
296         int i = 0;
297
298         end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
299         count = (end_read - (read & mask)) & mask;
300
301         if (error) {
302                 rte_errno = EIO;
303                 return -1;
304         }
305
306         if (ioat->hdls_disable) {
307                 read += count;
308                 goto end;
309         }
310
311         if (count > max_copies)
312                 count = max_copies;
313
314         for (; i < count - 1; i += 2, read += 2) {
315                 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
316                 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
317
318                 _mm_storeu_si128((__m128i *)&src_hdls[i],
319                                 _mm_unpacklo_epi64(hdls0, hdls1));
320                 _mm_storeu_si128((__m128i *)&dst_hdls[i],
321                                 _mm_unpackhi_epi64(hdls0, hdls1));
322         }
323         for (; i < count; i++, read++) {
324                 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
325                 src_hdls[i] = hdls[0];
326                 dst_hdls[i] = hdls[1];
327         }
328
329 end:
330         ioat->next_read = read;
331         ioat->completed += count;
332         return count;
333 }
334
335 static __rte_always_inline int
336 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
337                 const struct rte_idxd_user_hdl *hdl)
338 {
339         struct rte_idxd_rawdev *idxd =
340                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
341         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
342
343         /* check for room in the handle ring */
344         if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
345                 goto failed;
346
347         /* check for space in current batch */
348         if (b->op_count >= BATCH_SIZE)
349                 goto failed;
350
351         /* check that we can actually use the current batch */
352         if (b->submitted)
353                 goto failed;
354
355         /* write the descriptor */
356         b->ops[b->op_count++] = *desc;
357
358         /* store the completion details */
359         if (!idxd->hdls_disable)
360                 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
361         if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
362                 idxd->next_free_hdl = 0;
363
364         return 1;
365
366 failed:
367         rte_errno = ENOSPC;
368         return 0;
369 }
370
371 static __rte_always_inline int
372 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
373                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
374 {
375         const struct rte_idxd_hw_desc desc = {
376                         .op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
377                                 IDXD_FLAG_CACHE_CONTROL,
378                         .src = src,
379                         .dst = dst,
380                         .size = length
381         };
382         const struct rte_idxd_user_hdl hdl = {
383                         .src = src_hdl,
384                         .dst = dst_hdl
385         };
386         return __idxd_write_desc(dev_id, &desc, &hdl);
387 }
388
389 static __rte_always_inline int
390 __idxd_fence(int dev_id)
391 {
392         static const struct rte_idxd_hw_desc fence = {
393                         .op_flags = IDXD_FLAG_FENCE
394         };
395         static const struct rte_idxd_user_hdl null_hdl;
396         return __idxd_write_desc(dev_id, &fence, &null_hdl);
397 }
398
399 static __rte_always_inline void
400 __idxd_movdir64b(volatile void *dst, const void *src)
401 {
402         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
403                         :
404                         : "a" (dst), "d" (src));
405 }
406
407 static __rte_always_inline void
408 __idxd_perform_ops(int dev_id)
409 {
410         struct rte_idxd_rawdev *idxd =
411                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
412         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
413
414         if (b->submitted || b->op_count == 0)
415                 return;
416         b->hdl_end = idxd->next_free_hdl;
417         b->comp.status = 0;
418         b->submitted = 1;
419         b->batch_desc.size = b->op_count + 1;
420         __idxd_movdir64b(idxd->portal, &b->batch_desc);
421
422         if (++idxd->next_batch == idxd->batch_ring_sz)
423                 idxd->next_batch = 0;
424 }
425
426 static __rte_always_inline int
427 __idxd_completed_ops(int dev_id, uint8_t max_ops,
428                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
429 {
430         struct rte_idxd_rawdev *idxd =
431                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
432         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
433         uint16_t h_idx = idxd->next_ret_hdl;
434         int n = 0;
435
436         while (b->submitted && b->comp.status != 0) {
437                 idxd->last_completed_hdl = b->hdl_end;
438                 b->submitted = 0;
439                 b->op_count = 0;
440                 if (++idxd->next_completed == idxd->batch_ring_sz)
441                         idxd->next_completed = 0;
442                 b = &idxd->batch_ring[idxd->next_completed];
443         }
444
445         if (!idxd->hdls_disable)
446                 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
447                         src_hdls[n] = idxd->hdl_ring[h_idx].src;
448                         dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
449                         if (++h_idx == idxd->hdl_ring_sz)
450                                 h_idx = 0;
451                 }
452         else
453                 while (h_idx != idxd->last_completed_hdl) {
454                         n++;
455                         if (++h_idx == idxd->hdl_ring_sz)
456                                 h_idx = 0;
457                 }
458
459         idxd->next_ret_hdl = h_idx;
460
461         return n;
462 }
463
464 static inline int
465 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
466                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
467 {
468         enum rte_ioat_dev_type *type =
469                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
470         if (*type == RTE_IDXD_DEV)
471                 return __idxd_enqueue_copy(dev_id, src, dst, length,
472                                 src_hdl, dst_hdl);
473         else
474                 return __ioat_enqueue_copy(dev_id, src, dst, length,
475                                 src_hdl, dst_hdl);
476 }
477
478 static inline int
479 rte_ioat_fence(int dev_id)
480 {
481         enum rte_ioat_dev_type *type =
482                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
483         if (*type == RTE_IDXD_DEV)
484                 return __idxd_fence(dev_id);
485         else
486                 return __ioat_fence(dev_id);
487 }
488
489 static inline void
490 rte_ioat_perform_ops(int dev_id)
491 {
492         enum rte_ioat_dev_type *type =
493                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
494         if (*type == RTE_IDXD_DEV)
495                 return __idxd_perform_ops(dev_id);
496         else
497                 return __ioat_perform_ops(dev_id);
498 }
499
500 static inline int
501 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
502                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
503 {
504         enum rte_ioat_dev_type *type =
505                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
506         if (*type == RTE_IDXD_DEV)
507                 return __idxd_completed_ops(dev_id, max_copies,
508                                 src_hdls, dst_hdls);
509         else
510                 return __ioat_completed_ops(dev_id,  max_copies,
511                                 src_hdls, dst_hdls);
512 }
513
514 static inline void
515 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
516 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
517
518 static inline int
519 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
520 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
521                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
522 {
523         return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
524 }
525
526 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */