raw/ioat: probe idxd PCI
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11
12 /**
13  * @internal
14  * Structure representing a device descriptor
15  */
16 struct rte_ioat_generic_hw_desc {
17         uint32_t size;
18         union {
19                 uint32_t control_raw;
20                 struct {
21                         uint32_t int_enable: 1;
22                         uint32_t src_snoop_disable: 1;
23                         uint32_t dest_snoop_disable: 1;
24                         uint32_t completion_update: 1;
25                         uint32_t fence: 1;
26                         uint32_t reserved2: 1;
27                         uint32_t src_page_break: 1;
28                         uint32_t dest_page_break: 1;
29                         uint32_t bundle: 1;
30                         uint32_t dest_dca: 1;
31                         uint32_t hint: 1;
32                         uint32_t reserved: 13;
33                         uint32_t op: 8;
34                 } control;
35         } u;
36         uint64_t src_addr;
37         uint64_t dest_addr;
38         uint64_t next;
39         uint64_t op_specific[4];
40 };
41
42 /**
43  * @internal
44  * Identify the data path to use.
45  * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
46  */
47 enum rte_ioat_dev_type {
48         RTE_IOAT_DEV,
49         RTE_IDXD_DEV,
50 };
51
52 /**
53  * @internal
54  * Structure representing an IOAT device instance
55  */
56 struct rte_ioat_rawdev {
57         enum rte_ioat_dev_type type;
58         struct rte_rawdev *rawdev;
59         const struct rte_memzone *mz;
60         const struct rte_memzone *desc_mz;
61
62         volatile uint16_t *doorbell;
63         phys_addr_t status_addr;
64         phys_addr_t ring_addr;
65
66         unsigned short ring_size;
67         bool hdls_disable;
68         struct rte_ioat_generic_hw_desc *desc_ring;
69         __m128i *hdls; /* completion handles for returning to user */
70
71
72         unsigned short next_read;
73         unsigned short next_write;
74
75         /* some statistics for tracking, if added/changed update xstats fns*/
76         uint64_t enqueue_failed __rte_cache_aligned;
77         uint64_t enqueued;
78         uint64_t started;
79         uint64_t completed;
80
81         /* to report completions, the device will write status back here */
82         volatile uint64_t status __rte_cache_aligned;
83
84         /* pointer to the register bar */
85         volatile struct rte_ioat_registers *regs;
86 };
87
88 #define RTE_IOAT_CHANSTS_IDLE                   0x1
89 #define RTE_IOAT_CHANSTS_SUSPENDED              0x2
90 #define RTE_IOAT_CHANSTS_HALTED                 0x3
91 #define RTE_IOAT_CHANSTS_ARMED                  0x4
92
93 /**
94  * @internal
95  * Structure representing an IDXD device instance
96  */
97 struct rte_idxd_rawdev {
98         enum rte_ioat_dev_type type;
99         void *portal; /* address to write the batch descriptor */
100
101         /* counters to track the batches and the individual op handles */
102         uint16_t batch_ring_sz;  /* size of batch ring */
103         uint16_t hdl_ring_sz;    /* size of the user hdl ring */
104
105         uint16_t next_batch;     /* where we write descriptor ops */
106         uint16_t next_completed; /* batch where we read completions */
107         uint16_t next_ret_hdl;   /* the next user hdl to return */
108         uint16_t last_completed_hdl; /* the last user hdl that has completed */
109         uint16_t next_free_hdl;  /* where the handle for next op will go */
110
111         struct rte_idxd_user_hdl *hdl_ring;
112         struct rte_idxd_desc_batch *batch_ring;
113 };
114
115 /*
116  * Enqueue a copy operation onto the ioat device
117  */
118 static inline int
119 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
120                 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
121 {
122         struct rte_ioat_rawdev *ioat =
123                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
124         unsigned short read = ioat->next_read;
125         unsigned short write = ioat->next_write;
126         unsigned short mask = ioat->ring_size - 1;
127         unsigned short space = mask + read - write;
128         struct rte_ioat_generic_hw_desc *desc;
129
130         if (space == 0) {
131                 ioat->enqueue_failed++;
132                 return 0;
133         }
134
135         ioat->next_write = write + 1;
136         write &= mask;
137
138         desc = &ioat->desc_ring[write];
139         desc->size = length;
140         /* set descriptor write-back every 16th descriptor */
141         desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
142         desc->src_addr = src;
143         desc->dest_addr = dst;
144
145         if (!ioat->hdls_disable)
146                 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
147                                         (int64_t)src_hdl);
148         rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
149
150         ioat->enqueued++;
151         return 1;
152 }
153
154 /* add fence to last written descriptor */
155 static inline int
156 rte_ioat_fence(int dev_id)
157 {
158         struct rte_ioat_rawdev *ioat =
159                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
160         unsigned short write = ioat->next_write;
161         unsigned short mask = ioat->ring_size - 1;
162         struct rte_ioat_generic_hw_desc *desc;
163
164         write = (write - 1) & mask;
165         desc = &ioat->desc_ring[write];
166
167         desc->u.control.fence = 1;
168         return 0;
169 }
170
171 /*
172  * Trigger hardware to begin performing enqueued operations
173  */
174 static inline void
175 rte_ioat_perform_ops(int dev_id)
176 {
177         struct rte_ioat_rawdev *ioat =
178                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
179         ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
180                         .control.completion_update = 1;
181         rte_compiler_barrier();
182         *ioat->doorbell = ioat->next_write;
183         ioat->started = ioat->enqueued;
184 }
185
186 /**
187  * @internal
188  * Returns the index of the last completed operation.
189  */
190 static inline int
191 rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
192 {
193         uint64_t status = ioat->status;
194
195         /* lower 3 bits indicate "transfer status" : active, idle, halted.
196          * We can ignore bit 0.
197          */
198         *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
199         return (status - ioat->ring_addr) >> 6;
200 }
201
202 /*
203  * Returns details of operations that have been completed
204  */
205 static inline int
206 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
207                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
208 {
209         struct rte_ioat_rawdev *ioat =
210                         (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
211         unsigned short mask = (ioat->ring_size - 1);
212         unsigned short read = ioat->next_read;
213         unsigned short end_read, count;
214         int error;
215         int i = 0;
216
217         end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
218         count = (end_read - (read & mask)) & mask;
219
220         if (error) {
221                 rte_errno = EIO;
222                 return -1;
223         }
224
225         if (ioat->hdls_disable) {
226                 read += count;
227                 goto end;
228         }
229
230         if (count > max_copies)
231                 count = max_copies;
232
233         for (; i < count - 1; i += 2, read += 2) {
234                 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
235                 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
236
237                 _mm_storeu_si128((__m128i *)&src_hdls[i],
238                                 _mm_unpacklo_epi64(hdls0, hdls1));
239                 _mm_storeu_si128((__m128i *)&dst_hdls[i],
240                                 _mm_unpackhi_epi64(hdls0, hdls1));
241         }
242         for (; i < count; i++, read++) {
243                 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
244                 src_hdls[i] = hdls[0];
245                 dst_hdls[i] = hdls[1];
246         }
247
248 end:
249         ioat->next_read = read;
250         ioat->completed += count;
251         return count;
252 }
253
254 static inline void
255 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
256 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
257
258 static inline int
259 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
260 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
261                 uintptr_t *src_hdls, uintptr_t *dst_hdls)
262 {
263         return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
264 }
265
266 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */