raw/ioat: add xstats tracking for idxd device
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11
12 /**
13  * @internal
14  * Structure representing a device descriptor
15  */
16 struct rte_ioat_generic_hw_desc {
17         uint32_t size;
18         union {
19                 uint32_t control_raw;
20                 struct {
21                         uint32_t int_enable: 1;
22                         uint32_t src_snoop_disable: 1;
23                         uint32_t dest_snoop_disable: 1;
24                         uint32_t completion_update: 1;
25                         uint32_t fence: 1;
26                         uint32_t reserved2: 1;
27                         uint32_t src_page_break: 1;
28                         uint32_t dest_page_break: 1;
29                         uint32_t bundle: 1;
30                         uint32_t dest_dca: 1;
31                         uint32_t hint: 1;
32                         uint32_t reserved: 13;
33                         uint32_t op: 8;
34                 } control;
35         } u;
36         uint64_t src_addr;
37         uint64_t dest_addr;
38         uint64_t next;
39         uint64_t op_specific[4];
40 };
41
42 /**
43  * @internal
44  * Identify the data path to use.
45  * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
46  */
47 enum rte_ioat_dev_type {
48         RTE_IOAT_DEV,
49         RTE_IDXD_DEV,
50 };
51
52 /**
53  * @internal
54  * some statistics for tracking, if added/changed update xstats fns
55  */
56 struct rte_ioat_xstats {
57         uint64_t enqueue_failed;
58         uint64_t enqueued;
59         uint64_t started;
60         uint64_t completed;
61 };
62
63 /**
64  * @internal
65  * Structure representing an IOAT device instance
66  */
67 struct rte_ioat_rawdev {
68         /* common fields at the top - match those in rte_idxd_rawdev */
69         enum rte_ioat_dev_type type;
70         struct rte_ioat_xstats xstats;
71
72         struct rte_rawdev *rawdev;
73         const struct rte_memzone *mz;
74         const struct rte_memzone *desc_mz;
75
76         volatile uint16_t *doorbell __rte_cache_aligned;
77         phys_addr_t status_addr;
78         phys_addr_t ring_addr;
79
80         unsigned short ring_size;
81         bool hdls_disable;
82         struct rte_ioat_generic_hw_desc *desc_ring;
83         __m128i *hdls; /* completion handles for returning to user */
84
85
86         unsigned short next_read;
87         unsigned short next_write;
88
89         /* to report completions, the device will write status back here */
90         volatile uint64_t status __rte_cache_aligned;
91
92         /* pointer to the register bar */
93         volatile struct rte_ioat_registers *regs;
94 };
95
96 #define RTE_IOAT_CHANSTS_IDLE                   0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED              0x2
98 #define RTE_IOAT_CHANSTS_HALTED                 0x3
99 #define RTE_IOAT_CHANSTS_ARMED                  0x4
100
101 /*
102  * Defines used in the data path for interacting with hardware.
103  */
104 #define IDXD_CMD_OP_SHIFT 24
105 enum rte_idxd_ops {
106         idxd_op_nop = 0,
107         idxd_op_batch,
108         idxd_op_drain,
109         idxd_op_memmove,
110         idxd_op_fill
111 };
112
113 #define IDXD_FLAG_FENCE                 (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL         (1 << 8)
117
118 /**
119  * Hardware descriptor used by DSA hardware, for both bursts and
120  * for individual operations.
121  */
122 struct rte_idxd_hw_desc {
123         uint32_t pasid;
124         uint32_t op_flags;
125         rte_iova_t completion;
126
127         RTE_STD_C11
128         union {
129                 rte_iova_t src;      /* source address for copy ops etc. */
130                 rte_iova_t desc_addr; /* descriptor pointer for batch */
131         };
132         rte_iova_t dst;
133
134         uint32_t size;    /* length of data for op, or batch size */
135
136         /* 28 bytes of padding here */
137 } __rte_aligned(64);
138
139 /**
140  * Completion record structure written back by DSA
141  */
142 struct rte_idxd_completion {
143         uint8_t status;
144         uint8_t result;
145         /* 16-bits pad here */
146         uint32_t completed_size; /* data length, or descriptors for batch */
147
148         rte_iova_t fault_address;
149         uint32_t invalid_flags;
150 } __rte_aligned(32);
151
152 #define BATCH_SIZE 64
153
154 /**
155  * Structure used inside the driver for building up and submitting
156  * a batch of operations to the DSA hardware.
157  */
158 struct rte_idxd_desc_batch {
159         struct rte_idxd_completion comp; /* the completion record for batch */
160
161         uint16_t submitted;
162         uint16_t op_count;
163         uint16_t hdl_end;
164
165         struct rte_idxd_hw_desc batch_desc;
166
167         /* batches must always have 2 descriptors, so put a null at the start */
168         struct rte_idxd_hw_desc null_desc;
169         struct rte_idxd_hw_desc ops[BATCH_SIZE];
170 };
171
172 /**
173  * structure used to save the "handles" provided by the user to be
174  * returned to the user on job completion.
175  */
176 struct rte_idxd_user_hdl {
177         uint64_t src;
178         uint64_t dst;
179 };
180
181 /**
182  * @internal
183  * Structure representing an IDXD device instance
184  */
185 struct rte_idxd_rawdev {
186         enum rte_ioat_dev_type type;
187         struct rte_ioat_xstats xstats;
188
189         void *portal; /* address to write the batch descriptor */
190
191         /* counters to track the batches and the individual op handles */
192         uint16_t batch_ring_sz;  /* size of batch ring */
193         uint16_t hdl_ring_sz;    /* size of the user hdl ring */
194
195         uint16_t next_batch;     /* where we write descriptor ops */
196         uint16_t next_completed; /* batch where we read completions */
197         uint16_t next_ret_hdl;   /* the next user hdl to return */
198         uint16_t last_completed_hdl; /* the last user hdl that has completed */
199         uint16_t next_free_hdl;  /* where the handle for next op will go */
200         uint16_t hdls_disable;   /* disable tracking completion handles */
201
202         struct rte_idxd_user_hdl *hdl_ring;
203         struct rte_idxd_desc_batch *batch_ring;
204 };
205
206 /*
207  * Enqueue a copy operation onto the ioat device
208  */
209 static __rte_always_inline int
210 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
211                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
212 {
213         struct rte_ioat_rawdev *ioat =
214                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
215         unsigned short read = ioat->next_read;
216         unsigned short write = ioat->next_write;
217         unsigned short mask = ioat->ring_size - 1;
218         unsigned short space = mask + read - write;
219         struct rte_ioat_generic_hw_desc *desc;
220
221         if (space == 0) {
222                 ioat->xstats.enqueue_failed++;
223                 return 0;
224         }
225
226         ioat->next_write = write + 1;
227         write &= mask;
228
229         desc = &ioat->desc_ring[write];
230         desc->size = length;
231         /* set descriptor write-back every 16th descriptor */
232         desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
233         desc->src_addr = src;
234         desc->dest_addr = dst;
235
236         if (!ioat->hdls_disable)
237                 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
238                                         (int64_t)src_hdl);
239         rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
240
241         ioat->xstats.enqueued++;
242         return 1;
243 }
244
245 /* add fence to last written descriptor */
246 static __rte_always_inline int
247 __ioat_fence(int dev_id)
248 {
249         struct rte_ioat_rawdev *ioat =
250                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
251         unsigned short write = ioat->next_write;
252         unsigned short mask = ioat->ring_size - 1;
253         struct rte_ioat_generic_hw_desc *desc;
254
255         write = (write - 1) & mask;
256         desc = &ioat->desc_ring[write];
257
258         desc->u.control.fence = 1;
259         return 0;
260 }
261
262 /*
263  * Trigger hardware to begin performing enqueued operations
264  */
265 static __rte_always_inline void
266 __ioat_perform_ops(int dev_id)
267 {
268         struct rte_ioat_rawdev *ioat =
269                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
270         ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
271                         .control.completion_update = 1;
272         rte_compiler_barrier();
273         *ioat->doorbell = ioat->next_write;
274         ioat->xstats.started = ioat->xstats.enqueued;
275 }
276
277 /**
278  * @internal
279  * Returns the index of the last completed operation.
280  */
281 static __rte_always_inline int
282 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
283 {
284         uint64_t status = ioat->status;
285
286         /* lower 3 bits indicate "transfer status" : active, idle, halted.
287          * We can ignore bit 0.
288          */
289         *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
290         return (status - ioat->ring_addr) >> 6;
291 }
292
293 /*
294  * Returns details of operations that have been completed
295  */
296 static __rte_always_inline int
297 __ioat_completed_ops(int dev_id, uint8_t max_copies,
298                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
299 {
300         struct rte_ioat_rawdev *ioat =
301                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
302         unsigned short mask = (ioat->ring_size - 1);
303         unsigned short read = ioat->next_read;
304         unsigned short end_read, count;
305         int error;
306         int i = 0;
307
308         end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
309         count = (end_read - (read & mask)) & mask;
310
311         if (error) {
312                 rte_errno = EIO;
313                 return -1;
314         }
315
316         if (ioat->hdls_disable) {
317                 read += count;
318                 goto end;
319         }
320
321         if (count > max_copies)
322                 count = max_copies;
323
324         for (; i < count - 1; i += 2, read += 2) {
325                 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
326                 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
327
328                 _mm_storeu_si128((__m128i *)&src_hdls[i],
329                                 _mm_unpacklo_epi64(hdls0, hdls1));
330                 _mm_storeu_si128((__m128i *)&dst_hdls[i],
331                                 _mm_unpackhi_epi64(hdls0, hdls1));
332         }
333         for (; i < count; i++, read++) {
334                 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
335                 src_hdls[i] = hdls[0];
336                 dst_hdls[i] = hdls[1];
337         }
338
339 end:
340         ioat->next_read = read;
341         ioat->xstats.completed += count;
342         return count;
343 }
344
345 static __rte_always_inline int
346 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
347                 const struct rte_idxd_user_hdl *hdl)
348 {
349         struct rte_idxd_rawdev *idxd =
350                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
351         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
352
353         /* check for room in the handle ring */
354         if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
355                 goto failed;
356
357         /* check for space in current batch */
358         if (b->op_count >= BATCH_SIZE)
359                 goto failed;
360
361         /* check that we can actually use the current batch */
362         if (b->submitted)
363                 goto failed;
364
365         /* write the descriptor */
366         b->ops[b->op_count++] = *desc;
367
368         /* store the completion details */
369         if (!idxd->hdls_disable)
370                 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
371         if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
372                 idxd->next_free_hdl = 0;
373
374         idxd->xstats.enqueued++;
375         return 1;
376
377 failed:
378         idxd->xstats.enqueue_failed++;
379         rte_errno = ENOSPC;
380         return 0;
381 }
382
383 static __rte_always_inline int
384 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
385                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
386 {
387         const struct rte_idxd_hw_desc desc = {
388                         .op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
389                                 IDXD_FLAG_CACHE_CONTROL,
390                         .src = src,
391                         .dst = dst,
392                         .size = length
393         };
394         const struct rte_idxd_user_hdl hdl = {
395                         .src = src_hdl,
396                         .dst = dst_hdl
397         };
398         return __idxd_write_desc(dev_id, &desc, &hdl);
399 }
400
401 static __rte_always_inline int
402 __idxd_fence(int dev_id)
403 {
404         static const struct rte_idxd_hw_desc fence = {
405                         .op_flags = IDXD_FLAG_FENCE
406         };
407         static const struct rte_idxd_user_hdl null_hdl;
408         return __idxd_write_desc(dev_id, &fence, &null_hdl);
409 }
410
411 static __rte_always_inline void
412 __idxd_movdir64b(volatile void *dst, const void *src)
413 {
414         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
415                         :
416                         : "a" (dst), "d" (src));
417 }
418
419 static __rte_always_inline void
420 __idxd_perform_ops(int dev_id)
421 {
422         struct rte_idxd_rawdev *idxd =
423                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
424         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
425
426         if (b->submitted || b->op_count == 0)
427                 return;
428         b->hdl_end = idxd->next_free_hdl;
429         b->comp.status = 0;
430         b->submitted = 1;
431         b->batch_desc.size = b->op_count + 1;
432         __idxd_movdir64b(idxd->portal, &b->batch_desc);
433
434         if (++idxd->next_batch == idxd->batch_ring_sz)
435                 idxd->next_batch = 0;
436         idxd->xstats.started = idxd->xstats.enqueued;
437 }
438
439 static __rte_always_inline int
440 __idxd_completed_ops(int dev_id, uint8_t max_ops,
441                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
442 {
443         struct rte_idxd_rawdev *idxd =
444                         (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
445         struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
446         uint16_t h_idx = idxd->next_ret_hdl;
447         int n = 0;
448
449         while (b->submitted && b->comp.status != 0) {
450                 idxd->last_completed_hdl = b->hdl_end;
451                 b->submitted = 0;
452                 b->op_count = 0;
453                 if (++idxd->next_completed == idxd->batch_ring_sz)
454                         idxd->next_completed = 0;
455                 b = &idxd->batch_ring[idxd->next_completed];
456         }
457
458         if (!idxd->hdls_disable)
459                 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
460                         src_hdls[n] = idxd->hdl_ring[h_idx].src;
461                         dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
462                         if (++h_idx == idxd->hdl_ring_sz)
463                                 h_idx = 0;
464                 }
465         else
466                 while (h_idx != idxd->last_completed_hdl) {
467                         n++;
468                         if (++h_idx == idxd->hdl_ring_sz)
469                                 h_idx = 0;
470                 }
471
472         idxd->next_ret_hdl = h_idx;
473
474         idxd->xstats.completed += n;
475         return n;
476 }
477
478 static inline int
479 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
480                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
481 {
482         enum rte_ioat_dev_type *type =
483                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
484         if (*type == RTE_IDXD_DEV)
485                 return __idxd_enqueue_copy(dev_id, src, dst, length,
486                                 src_hdl, dst_hdl);
487         else
488                 return __ioat_enqueue_copy(dev_id, src, dst, length,
489                                 src_hdl, dst_hdl);
490 }
491
492 static inline int
493 rte_ioat_fence(int dev_id)
494 {
495         enum rte_ioat_dev_type *type =
496                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
497         if (*type == RTE_IDXD_DEV)
498                 return __idxd_fence(dev_id);
499         else
500                 return __ioat_fence(dev_id);
501 }
502
503 static inline void
504 rte_ioat_perform_ops(int dev_id)
505 {
506         enum rte_ioat_dev_type *type =
507                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
508         if (*type == RTE_IDXD_DEV)
509                 return __idxd_perform_ops(dev_id);
510         else
511                 return __ioat_perform_ops(dev_id);
512 }
513
514 static inline int
515 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
516                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
517 {
518         enum rte_ioat_dev_type *type =
519                         (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
520         if (*type == RTE_IDXD_DEV)
521                 return __idxd_completed_ops(dev_id, max_copies,
522                                 src_hdls, dst_hdls);
523         else
524                 return __ioat_completed_ops(dev_id,  max_copies,
525                                 src_hdls, dst_hdls);
526 }
527
528 static inline void
529 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
530 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
531
532 static inline int
533 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
534 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
535                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
536 {
537         return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
538 }
539
540 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */