1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
9 * This header file contains the implementation of the various ioat
10 * rawdev functions for IOAT/CBDMA hardware. The API specification and key
11 * public structures are defined in "rte_ioat_rawdev.h".
13 * This file should not be included directly, but instead applications should
14 * include "rte_ioat_rawdev.h", which then includes this file - and the IDXD/DSA
15 * equivalent header - in turn.
18 #include <x86intrin.h>
19 #include <rte_rawdev.h>
20 #include <rte_memzone.h>
21 #include <rte_prefetch.h>
25 * Identify the data path to use.
26 * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
28 enum rte_ioat_dev_type {
35 * some statistics for tracking, if added/changed update xstats fns
37 struct rte_ioat_xstats {
38 uint64_t enqueue_failed;
44 #include "rte_idxd_rawdev_fns.h"
48 * Structure representing a device descriptor
50 struct rte_ioat_generic_hw_desc {
55 uint32_t int_enable: 1;
56 uint32_t src_snoop_disable: 1;
57 uint32_t dest_snoop_disable: 1;
58 uint32_t completion_update: 1;
60 uint32_t reserved2: 1;
61 uint32_t src_page_break: 1;
62 uint32_t dest_page_break: 1;
66 uint32_t reserved: 13;
73 uint64_t op_specific[4];
78 * Structure representing an IOAT device instance
80 struct rte_ioat_rawdev {
81 /* common fields at the top - match those in rte_idxd_rawdev */
82 enum rte_ioat_dev_type type;
83 struct rte_ioat_xstats xstats;
85 struct rte_rawdev *rawdev;
86 const struct rte_memzone *mz;
87 const struct rte_memzone *desc_mz;
89 volatile uint16_t *doorbell __rte_cache_aligned;
90 phys_addr_t status_addr;
91 phys_addr_t ring_addr;
93 unsigned short ring_size;
95 struct rte_ioat_generic_hw_desc *desc_ring;
96 __m128i *hdls; /* completion handles for returning to user */
99 unsigned short next_read;
100 unsigned short next_write;
102 /* to report completions, the device will write status back here */
103 volatile uint64_t status __rte_cache_aligned;
105 /* pointer to the register bar */
106 volatile struct rte_ioat_registers *regs;
109 #define RTE_IOAT_CHANSTS_IDLE 0x1
110 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
111 #define RTE_IOAT_CHANSTS_HALTED 0x3
112 #define RTE_IOAT_CHANSTS_ARMED 0x4
114 static __rte_always_inline uint16_t
115 __ioat_burst_capacity(int dev_id)
117 struct rte_ioat_rawdev *ioat =
118 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
119 unsigned short size = ioat->ring_size - 1;
120 unsigned short read = ioat->next_read;
121 unsigned short write = ioat->next_write;
122 unsigned short space = size - (write - read);
127 static __rte_always_inline int
128 __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
129 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
131 struct rte_ioat_rawdev *ioat =
132 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
133 unsigned short read = ioat->next_read;
134 unsigned short write = ioat->next_write;
135 unsigned short mask = ioat->ring_size - 1;
136 unsigned short space = mask + read - write;
137 struct rte_ioat_generic_hw_desc *desc;
140 ioat->xstats.enqueue_failed++;
144 ioat->next_write = write + 1;
147 desc = &ioat->desc_ring[write];
149 /* set descriptor write-back every 16th descriptor */
150 desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
151 (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
152 desc->src_addr = src;
153 desc->dest_addr = dst;
155 if (!ioat->hdls_disable)
156 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
158 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
160 ioat->xstats.enqueued++;
164 static __rte_always_inline int
165 __ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
166 unsigned int length, uintptr_t dst_hdl)
168 static const uintptr_t null_hdl;
170 return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
175 * Enqueue a copy operation onto the ioat device
177 static __rte_always_inline int
178 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
179 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
181 return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
185 /* add fence to last written descriptor */
186 static __rte_always_inline int
187 __ioat_fence(int dev_id)
189 struct rte_ioat_rawdev *ioat =
190 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
191 unsigned short write = ioat->next_write;
192 unsigned short mask = ioat->ring_size - 1;
193 struct rte_ioat_generic_hw_desc *desc;
195 write = (write - 1) & mask;
196 desc = &ioat->desc_ring[write];
198 desc->u.control.fence = 1;
203 * Trigger hardware to begin performing enqueued operations
205 static __rte_always_inline int
206 __ioat_perform_ops(int dev_id)
208 struct rte_ioat_rawdev *ioat =
209 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
210 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
211 .control.completion_update = 1;
212 rte_compiler_barrier();
213 *ioat->doorbell = ioat->next_write;
214 ioat->xstats.started = ioat->xstats.enqueued;
221 * Returns the index of the last completed operation.
223 static __rte_always_inline int
224 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
226 uint64_t status = ioat->status;
228 /* lower 3 bits indicate "transfer status" : active, idle, halted.
229 * We can ignore bit 0.
231 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
232 return (status - ioat->ring_addr) >> 6;
236 * Returns details of operations that have been completed
238 static __rte_always_inline int
239 __ioat_completed_ops(int dev_id, uint8_t max_copies,
240 uintptr_t *src_hdls, uintptr_t *dst_hdls)
242 struct rte_ioat_rawdev *ioat =
243 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
244 unsigned short mask = (ioat->ring_size - 1);
245 unsigned short read = ioat->next_read;
246 unsigned short end_read, count;
250 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
251 count = (end_read - (read & mask)) & mask;
258 if (ioat->hdls_disable) {
263 if (count > max_copies)
266 for (; i < count - 1; i += 2, read += 2) {
267 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
268 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
270 _mm_storeu_si128((__m128i *)&src_hdls[i],
271 _mm_unpacklo_epi64(hdls0, hdls1));
272 _mm_storeu_si128((__m128i *)&dst_hdls[i],
273 _mm_unpackhi_epi64(hdls0, hdls1));
275 for (; i < count; i++, read++) {
276 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
277 src_hdls[i] = hdls[0];
278 dst_hdls[i] = hdls[1];
282 ioat->next_read = read;
283 ioat->xstats.completed += count;
287 static inline uint16_t
288 rte_ioat_burst_capacity(int dev_id)
290 enum rte_ioat_dev_type *type =
291 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
292 if (*type == RTE_IDXD_DEV)
293 return __idxd_burst_capacity(dev_id);
295 return __ioat_burst_capacity(dev_id);
299 rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
300 unsigned int len, uintptr_t dst_hdl)
302 enum rte_ioat_dev_type *type =
303 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
304 if (*type == RTE_IDXD_DEV)
305 return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
307 return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
311 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
312 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
314 enum rte_ioat_dev_type *type =
315 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
316 if (*type == RTE_IDXD_DEV)
317 return __idxd_enqueue_copy(dev_id, src, dst, length,
320 return __ioat_enqueue_copy(dev_id, src, dst, length,
325 rte_ioat_fence(int dev_id)
327 enum rte_ioat_dev_type *type =
328 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
329 if (*type == RTE_IDXD_DEV)
330 return __idxd_fence(dev_id);
332 return __ioat_fence(dev_id);
336 rte_ioat_perform_ops(int dev_id)
338 enum rte_ioat_dev_type *type =
339 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
340 if (*type == RTE_IDXD_DEV)
341 return __idxd_perform_ops(dev_id);
343 return __ioat_perform_ops(dev_id);
347 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
348 uint32_t *status, uint8_t *num_unsuccessful,
349 uintptr_t *src_hdls, uintptr_t *dst_hdls)
351 enum rte_ioat_dev_type *type =
352 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
353 uint8_t tmp; /* used so functions don't need to check for null parameter */
355 if (num_unsuccessful == NULL)
356 num_unsuccessful = &tmp;
358 *num_unsuccessful = 0;
359 if (*type == RTE_IDXD_DEV)
360 return __idxd_completed_ops(dev_id, max_copies, status, num_unsuccessful,
363 return __ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
367 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
368 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
371 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
372 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
373 uintptr_t *src_hdls, uintptr_t *dst_hdls)
375 return rte_ioat_completed_ops(dev_id, max_copies, NULL, NULL,
379 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */