raw/ioat: make HW register spec private
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11
12 /**
13  * @internal
14  * Structure representing a device descriptor
15  */
16 struct rte_ioat_generic_hw_desc {
17         uint32_t size;
18         union {
19                 uint32_t control_raw;
20                 struct {
21                         uint32_t int_enable: 1;
22                         uint32_t src_snoop_disable: 1;
23                         uint32_t dest_snoop_disable: 1;
24                         uint32_t completion_update: 1;
25                         uint32_t fence: 1;
26                         uint32_t reserved2: 1;
27                         uint32_t src_page_break: 1;
28                         uint32_t dest_page_break: 1;
29                         uint32_t bundle: 1;
30                         uint32_t dest_dca: 1;
31                         uint32_t hint: 1;
32                         uint32_t reserved: 13;
33                         uint32_t op: 8;
34                 } control;
35         } u;
36         uint64_t src_addr;
37         uint64_t dest_addr;
38         uint64_t next;
39         uint64_t op_specific[4];
40 };
41
42 /**
43  * @internal
44  * Structure representing a device instance
45  */
46 struct rte_ioat_rawdev {
47         struct rte_rawdev *rawdev;
48         const struct rte_memzone *mz;
49         const struct rte_memzone *desc_mz;
50
51         volatile uint16_t *doorbell;
52         phys_addr_t status_addr;
53         phys_addr_t ring_addr;
54
55         unsigned short ring_size;
56         bool hdls_disable;
57         struct rte_ioat_generic_hw_desc *desc_ring;
58         __m128i *hdls; /* completion handles for returning to user */
59
60
61         unsigned short next_read;
62         unsigned short next_write;
63
64         /* some statistics for tracking, if added/changed update xstats fns*/
65         uint64_t enqueue_failed __rte_cache_aligned;
66         uint64_t enqueued;
67         uint64_t started;
68         uint64_t completed;
69
70         /* to report completions, the device will write status back here */
71         volatile uint64_t status __rte_cache_aligned;
72
73         /* pointer to the register bar */
74         volatile struct rte_ioat_registers *regs;
75 };
76
77 #define RTE_IOAT_CHANSTS_IDLE                   0x1
78 #define RTE_IOAT_CHANSTS_SUSPENDED              0x2
79 #define RTE_IOAT_CHANSTS_HALTED                 0x3
80 #define RTE_IOAT_CHANSTS_ARMED                  0x4
81
82 /*
83  * Enqueue a copy operation onto the ioat device
84  */
85 static inline int
86 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
87                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
88 {
89         struct rte_ioat_rawdev *ioat =
90                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
91         unsigned short read = ioat->next_read;
92         unsigned short write = ioat->next_write;
93         unsigned short mask = ioat->ring_size - 1;
94         unsigned short space = mask + read - write;
95         struct rte_ioat_generic_hw_desc *desc;
96
97         if (space == 0) {
98                 ioat->enqueue_failed++;
99                 return 0;
100         }
101
102         ioat->next_write = write + 1;
103         write &= mask;
104
105         desc = &ioat->desc_ring[write];
106         desc->size = length;
107         /* set descriptor write-back every 16th descriptor */
108         desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
109         desc->src_addr = src;
110         desc->dest_addr = dst;
111
112         if (!ioat->hdls_disable)
113                 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
114                                         (int64_t)src_hdl);
115         rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
116
117         ioat->enqueued++;
118         return 1;
119 }
120
121 /* add fence to last written descriptor */
122 static inline int
123 rte_ioat_fence(int dev_id)
124 {
125         struct rte_ioat_rawdev *ioat =
126                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
127         unsigned short write = ioat->next_write;
128         unsigned short mask = ioat->ring_size - 1;
129         struct rte_ioat_generic_hw_desc *desc;
130
131         write = (write - 1) & mask;
132         desc = &ioat->desc_ring[write];
133
134         desc->u.control.fence = 1;
135         return 0;
136 }
137
138 /*
139  * Trigger hardware to begin performing enqueued operations
140  */
141 static inline void
142 rte_ioat_perform_ops(int dev_id)
143 {
144         struct rte_ioat_rawdev *ioat =
145                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
146         ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
147                         .control.completion_update = 1;
148         rte_compiler_barrier();
149         *ioat->doorbell = ioat->next_write;
150         ioat->started = ioat->enqueued;
151 }
152
153 /**
154  * @internal
155  * Returns the index of the last completed operation.
156  */
157 static inline int
158 rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
159 {
160         uint64_t status = ioat->status;
161
162         /* lower 3 bits indicate "transfer status" : active, idle, halted.
163          * We can ignore bit 0.
164          */
165         *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
166         return (status - ioat->ring_addr) >> 6;
167 }
168
169 /*
170  * Returns details of operations that have been completed
171  */
172 static inline int
173 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
174                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
175 {
176         struct rte_ioat_rawdev *ioat =
177                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
178         unsigned short mask = (ioat->ring_size - 1);
179         unsigned short read = ioat->next_read;
180         unsigned short end_read, count;
181         int error;
182         int i = 0;
183
184         end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
185         count = (end_read - (read & mask)) & mask;
186
187         if (error) {
188                 rte_errno = EIO;
189                 return -1;
190         }
191
192         if (ioat->hdls_disable) {
193                 read += count;
194                 goto end;
195         }
196
197         if (count > max_copies)
198                 count = max_copies;
199
200         for (; i < count - 1; i += 2, read += 2) {
201                 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
202                 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
203
204                 _mm_storeu_si128((__m128i *)&src_hdls[i],
205                                 _mm_unpacklo_epi64(hdls0, hdls1));
206                 _mm_storeu_si128((__m128i *)&dst_hdls[i],
207                                 _mm_unpackhi_epi64(hdls0, hdls1));
208         }
209         for (; i < count; i++, read++) {
210                 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
211                 src_hdls[i] = hdls[0];
212                 dst_hdls[i] = hdls[1];
213         }
214
215 end:
216         ioat->next_read = read;
217         ioat->completed += count;
218         return count;
219 }
220
221 static inline void
222 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
223 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
224
225 static inline int
226 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
227 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
228                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
229 {
230         return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
231 }
232
233 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */