raw/ioat: allow perform operations function to return error
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11
12 /**
13  * @internal
14  * Structure representing a device descriptor
15  */
16 struct rte_ioat_generic_hw_desc {
17         uint32_t size;
18         union {
19                 uint32_t control_raw;
20                 struct {
21                         uint32_t int_enable: 1;
22                         uint32_t src_snoop_disable: 1;
23                         uint32_t dest_snoop_disable: 1;
24                         uint32_t completion_update: 1;
25                         uint32_t fence: 1;
26                         uint32_t reserved2: 1;
27                         uint32_t src_page_break: 1;
28                         uint32_t dest_page_break: 1;
29                         uint32_t bundle: 1;
30                         uint32_t dest_dca: 1;
31                         uint32_t hint: 1;
32                         uint32_t reserved: 13;
33                         uint32_t op: 8;
34                 } control;
35         } u;
36         uint64_t src_addr;
37         uint64_t dest_addr;
38         uint64_t next;
39         uint64_t op_specific[4];
40 };
41
42 /**
43  * @internal
44  * Identify the data path to use.
45  * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
46  */
47 enum rte_ioat_dev_type {
48         RTE_IOAT_DEV,
49         RTE_IDXD_DEV,
50 };
51
52 /**
53  * @internal
54  * some statistics for tracking, if added/changed update xstats fns
55  */
56 struct rte_ioat_xstats {
57         uint64_t enqueue_failed;
58         uint64_t enqueued;
59         uint64_t started;
60         uint64_t completed;
61 };
62
63 /**
64  * @internal
65  * Structure representing an IOAT device instance
66  */
67 struct rte_ioat_rawdev {
68         /* common fields at the top - match those in rte_idxd_rawdev */
69         enum rte_ioat_dev_type type;
70         struct rte_ioat_xstats xstats;
71
72         struct rte_rawdev *rawdev;
73         const struct rte_memzone *mz;
74         const struct rte_memzone *desc_mz;
75
76         volatile uint16_t *doorbell __rte_cache_aligned;
77         phys_addr_t status_addr;
78         phys_addr_t ring_addr;
79
80         unsigned short ring_size;
81         bool hdls_disable;
82         struct rte_ioat_generic_hw_desc *desc_ring;
83         __m128i *hdls; /* completion handles for returning to user */
84
85
86         unsigned short next_read;
87         unsigned short next_write;
88
89         /* to report completions, the device will write status back here */
90         volatile uint64_t status __rte_cache_aligned;
91
92         /* pointer to the register bar */
93         volatile struct rte_ioat_registers *regs;
94 };
95
96 #define RTE_IOAT_CHANSTS_IDLE                   0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED              0x2
98 #define RTE_IOAT_CHANSTS_HALTED                 0x3
99 #define RTE_IOAT_CHANSTS_ARMED                  0x4
100
101 /*
102  * Defines used in the data path for interacting with hardware.
103  */
104 #define IDXD_CMD_OP_SHIFT 24
105 enum rte_idxd_ops {
106         idxd_op_nop = 0,
107         idxd_op_batch,
108         idxd_op_drain,
109         idxd_op_memmove,
110         idxd_op_fill
111 };
112
113 #define IDXD_FLAG_FENCE                 (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL         (1 << 8)
117
118 #define IOAT_COMP_UPDATE_SHIFT  3
119 #define IOAT_CMD_OP_SHIFT       24
120 enum rte_ioat_ops {
121         ioat_op_copy = 0,       /* Standard DMA Operation */
122         ioat_op_fill            /* Block Fill */
123 };
124
125 /**
126  * Hardware descriptor used by DSA hardware, for both bursts and
127  * for individual operations.
128  */
129 struct rte_idxd_hw_desc {
130         uint32_t pasid;
131         uint32_t op_flags;
132         rte_iova_t completion;
133
134         RTE_STD_C11
135         union {
136                 rte_iova_t src;      /* source address for copy ops etc. */
137                 rte_iova_t desc_addr; /* descriptor pointer for batch */
138         };
139         rte_iova_t dst;
140
141         uint32_t size;    /* length of data for op, or batch size */
142
143         uint16_t intr_handle; /* completion interrupt handle */
144
145         /* remaining 26 bytes are reserved */
146         uint16_t __reserved[13];
147 } __rte_aligned(64);
148
149 /**
150  * Completion record structure written back by DSA
151  */
152 struct rte_idxd_completion {
153         uint8_t status;
154         uint8_t result;
155         /* 16-bits pad here */
156         uint32_t completed_size; /* data length, or descriptors for batch */
157
158         rte_iova_t fault_address;
159         uint32_t invalid_flags;
160 } __rte_aligned(32);
161
162 #define BATCH_SIZE 64
163
164 /**
165  * Structure used inside the driver for building up and submitting
166  * a batch of operations to the DSA hardware.
167  */
168 struct rte_idxd_desc_batch {
169         struct rte_idxd_completion comp; /* the completion record for batch */
170
171         uint16_t submitted;
172         uint16_t op_count;
173         uint16_t hdl_end;
174
175         struct rte_idxd_hw_desc batch_desc;
176
177         /* batches must always have 2 descriptors, so put a null at the start */
178         struct rte_idxd_hw_desc null_desc;
179         struct rte_idxd_hw_desc ops[BATCH_SIZE];
180 };
181
182 /**
183  * structure used to save the "handles" provided by the user to be
184  * returned to the user on job completion.
185  */
186 struct rte_idxd_user_hdl {
187         uint64_t src;
188         uint64_t dst;
189 };
190
191 /**
192  * @internal
193  * Structure representing an IDXD device instance
194  */
195 struct rte_idxd_rawdev {
196         enum rte_ioat_dev_type type;
197         struct rte_ioat_xstats xstats;
198
199         void *portal; /* address to write the batch descriptor */
200
201         /* counters to track the batches and the individual op handles */
202         uint16_t batch_ring_sz;  /* size of batch ring */
203         uint16_t hdl_ring_sz;    /* size of the user hdl ring */
204
205         uint16_t next_batch;     /* where we write descriptor ops */
206         uint16_t next_completed; /* batch where we read completions */
207         uint16_t next_ret_hdl;   /* the next user hdl to return */
208         uint16_t last_completed_hdl; /* the last user hdl that has completed */
209         uint16_t next_free_hdl;  /* where the handle for next op will go */
210         uint16_t hdls_disable;   /* disable tracking completion handles */
211
212         struct rte_idxd_user_hdl *hdl_ring;
213         struct rte_idxd_desc_batch *batch_ring;
214 };
215
216 static __rte_always_inline int
217 __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
218                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
219 {
220         struct rte_ioat_rawdev *ioat =
221                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
222         unsigned short read = ioat->next_read;
223         unsigned short write = ioat->next_write;
224         unsigned short mask = ioat->ring_size - 1;
225         unsigned short space = mask + read - write;
226         struct rte_ioat_generic_hw_desc *desc;
227
228         if (space == 0) {
229                 ioat->xstats.enqueue_failed++;
230                 return 0;
231         }
232
233         ioat->next_write = write + 1;
234         write &= mask;
235
236         desc = &ioat->desc_ring[write];
237         desc->size = length;
238         /* set descriptor write-back every 16th descriptor */
239         desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
240                         (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
241         desc->src_addr = src;
242         desc->dest_addr = dst;
243
244         if (!ioat->hdls_disable)
245                 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
246                                         (int64_t)src_hdl);
247         rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
248
249         ioat->xstats.enqueued++;
250         return 1;
251 }
252
253 static __rte_always_inline int
254 __ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
255                 unsigned int length, uintptr_t dst_hdl)
256 {
257         static const uintptr_t null_hdl;
258
259         return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
260                         null_hdl, dst_hdl);
261 }
262
263 /*
264  * Enqueue a copy operation onto the ioat device
265  */
266 static __rte_always_inline int
267 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
268                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
269 {
270         return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
271                         src_hdl, dst_hdl);
272 }
273
274 /* add fence to last written descriptor */
275 static __rte_always_inline int
276 __ioat_fence(int dev_id)
277 {
278         struct rte_ioat_rawdev *ioat =
279                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
280         unsigned short write = ioat->next_write;
281         unsigned short mask = ioat->ring_size - 1;
282         struct rte_ioat_generic_hw_desc *desc;
283
284         write = (write - 1) & mask;
285         desc = &ioat->desc_ring[write];
286
287         desc->u.control.fence = 1;
288         return 0;
289 }
290
291 /*
292  * Trigger hardware to begin performing enqueued operations
293  */
294 static __rte_always_inline int
295 __ioat_perform_ops(int dev_id)
296 {
297         struct rte_ioat_rawdev *ioat =
298                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
299         ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
300                         .control.completion_update = 1;
301         rte_compiler_barrier();
302         *ioat->doorbell = ioat->next_write;
303         ioat->xstats.started = ioat->xstats.enqueued;
304
305         return 0;
306 }
307
308 /**
309  * @internal
310  * Returns the index of the last completed operation.
311  */
312 static __rte_always_inline int
313 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
314 {
315         uint64_t status = ioat->status;
316
317         /* lower 3 bits indicate "transfer status" : active, idle, halted.
318          * We can ignore bit 0.
319          */
320         *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
321         return (status - ioat->ring_addr) >> 6;
322 }
323
324 /*
325  * Returns details of operations that have been completed
326  */
327 static __rte_always_inline int
328 __ioat_completed_ops(int dev_id, uint8_t max_copies,
329                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
330 {
331         struct rte_ioat_rawdev *ioat =
332                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
333         unsigned short mask = (ioat->ring_size - 1);
334         unsigned short read = ioat->next_read;
335         unsigned short end_read, count;
336         int error;
337         int i = 0;
338
339         end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
340         count = (end_read - (read & mask)) & mask;
341
342         if (error) {
343                 rte_errno = EIO;
344                 return -1;
345         }
346
347         if (ioat->hdls_disable) {
348                 read += count;
349                 goto end;
350         }
351
352         if (count > max_copies)
353                 count = max_copies;
354
355         for (; i < count - 1; i += 2, read += 2) {
356                 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
357                 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
358
359                 _mm_storeu_si128((__m128i *)&src_hdls[i],
360                                 _mm_unpacklo_epi64(hdls0, hdls1));
361                 _mm_storeu_si128((__m128i *)&dst_hdls[i],
362                                 _mm_unpackhi_epi64(hdls0, hdls1));
363         }
364         for (; i < count; i++, read++) {
365                 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
366                 src_hdls[i] = hdls[0];
367                 dst_hdls[i] = hdls[1];
368         }
369
370 end:
371         ioat->next_read = read;
372         ioat->xstats.completed += count;
373         return count;
374 }
375
376 static __rte_always_inline int
377 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
378                 const struct rte_idxd_user_hdl *hdl)
379 {
380         struct rte_idxd_rawdev *idxd =
381                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
382         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
383
384         /* check for room in the handle ring */
385         if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
386                 goto failed;
387
388         /* check for space in current batch */
389         if (b->op_count >= BATCH_SIZE)
390                 goto failed;
391
392         /* check that we can actually use the current batch */
393         if (b->submitted)
394                 goto failed;
395
396         /* write the descriptor */
397         b->ops[b->op_count++] = *desc;
398
399         /* store the completion details */
400         if (!idxd->hdls_disable)
401                 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
402         if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
403                 idxd->next_free_hdl = 0;
404
405         idxd->xstats.enqueued++;
406         return 1;
407
408 failed:
409         idxd->xstats.enqueue_failed++;
410         rte_errno = ENOSPC;
411         return 0;
412 }
413
414 static __rte_always_inline int
415 __idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,
416                 unsigned int length, uintptr_t dst_hdl)
417 {
418         const struct rte_idxd_hw_desc desc = {
419                         .op_flags =  (idxd_op_fill << IDXD_CMD_OP_SHIFT) |
420                                 IDXD_FLAG_CACHE_CONTROL,
421                         .src = pattern,
422                         .dst = dst,
423                         .size = length
424         };
425         const struct rte_idxd_user_hdl hdl = {
426                         .dst = dst_hdl
427         };
428         return __idxd_write_desc(dev_id, &desc, &hdl);
429 }
430
431 static __rte_always_inline int
432 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
433                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
434 {
435         const struct rte_idxd_hw_desc desc = {
436                         .op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
437                                 IDXD_FLAG_CACHE_CONTROL,
438                         .src = src,
439                         .dst = dst,
440                         .size = length
441         };
442         const struct rte_idxd_user_hdl hdl = {
443                         .src = src_hdl,
444                         .dst = dst_hdl
445         };
446         return __idxd_write_desc(dev_id, &desc, &hdl);
447 }
448
449 static __rte_always_inline int
450 __idxd_fence(int dev_id)
451 {
452         static const struct rte_idxd_hw_desc fence = {
453                         .op_flags = IDXD_FLAG_FENCE
454         };
455         static const struct rte_idxd_user_hdl null_hdl;
456         return __idxd_write_desc(dev_id, &fence, &null_hdl);
457 }
458
459 static __rte_always_inline void
460 __idxd_movdir64b(volatile void *dst, const void *src)
461 {
462         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
463                         :
464                         : "a" (dst), "d" (src));
465 }
466
467 static __rte_always_inline int
468 __idxd_perform_ops(int dev_id)
469 {
470         struct rte_idxd_rawdev *idxd =
471                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
472         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
473
474         if (b->submitted || b->op_count == 0)
475                 return 0;
476         b->hdl_end = idxd->next_free_hdl;
477         b->comp.status = 0;
478         b->submitted = 1;
479         b->batch_desc.size = b->op_count + 1;
480         __idxd_movdir64b(idxd->portal, &b->batch_desc);
481
482         if (++idxd->next_batch == idxd->batch_ring_sz)
483                 idxd->next_batch = 0;
484         idxd->xstats.started = idxd->xstats.enqueued;
485         return 0;
486 }
487
488 static __rte_always_inline int
489 __idxd_completed_ops(int dev_id, uint8_t max_ops,
490                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
491 {
492         struct rte_idxd_rawdev *idxd =
493                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
494         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
495         uint16_t h_idx = idxd->next_ret_hdl;
496         int n = 0;
497
498         while (b->submitted && b->comp.status != 0) {
499                 idxd->last_completed_hdl = b->hdl_end;
500                 b->submitted = 0;
501                 b->op_count = 0;
502                 if (++idxd->next_completed == idxd->batch_ring_sz)
503                         idxd->next_completed = 0;
504                 b = &idxd->batch_ring[idxd->next_completed];
505         }
506
507         if (!idxd->hdls_disable)
508                 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
509                         src_hdls[n] = idxd->hdl_ring[h_idx].src;
510                         dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
511                         if (++h_idx == idxd->hdl_ring_sz)
512                                 h_idx = 0;
513                 }
514         else
515                 while (h_idx != idxd->last_completed_hdl) {
516                         n++;
517                         if (++h_idx == idxd->hdl_ring_sz)
518                                 h_idx = 0;
519                 }
520
521         idxd->next_ret_hdl = h_idx;
522
523         idxd->xstats.completed += n;
524         return n;
525 }
526
527 static inline int
528 rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
529                 unsigned int len, uintptr_t dst_hdl)
530 {
531         enum rte_ioat_dev_type *type =
532                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
533         if (*type == RTE_IDXD_DEV)
534                 return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
535         else
536                 return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
537 }
538
539 static inline int
540 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
541                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
542 {
543         enum rte_ioat_dev_type *type =
544                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
545         if (*type == RTE_IDXD_DEV)
546                 return __idxd_enqueue_copy(dev_id, src, dst, length,
547                                 src_hdl, dst_hdl);
548         else
549                 return __ioat_enqueue_copy(dev_id, src, dst, length,
550                                 src_hdl, dst_hdl);
551 }
552
553 static inline int
554 rte_ioat_fence(int dev_id)
555 {
556         enum rte_ioat_dev_type *type =
557                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
558         if (*type == RTE_IDXD_DEV)
559                 return __idxd_fence(dev_id);
560         else
561                 return __ioat_fence(dev_id);
562 }
563
564 static inline int
565 rte_ioat_perform_ops(int dev_id)
566 {
567         enum rte_ioat_dev_type *type =
568                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
569         if (*type == RTE_IDXD_DEV)
570                 return __idxd_perform_ops(dev_id);
571         else
572                 return __ioat_perform_ops(dev_id);
573 }
574
575 static inline int
576 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
577                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
578 {
579         enum rte_ioat_dev_type *type =
580                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
581         if (*type == RTE_IDXD_DEV)
582                 return __idxd_completed_ops(dev_id, max_copies,
583                                 src_hdls, dst_hdls);
584         else
585                 return __ioat_completed_ops(dev_id,  max_copies,
586                                 src_hdls, dst_hdls);
587 }
588
589 static inline void
590 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
591 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
592
593 static inline int
594 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
595 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
596                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
597 {
598         return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
599 }
600
601 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */