1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
14 * Structure representing a device descriptor
16 struct rte_ioat_generic_hw_desc {
21 uint32_t int_enable: 1;
22 uint32_t src_snoop_disable: 1;
23 uint32_t dest_snoop_disable: 1;
24 uint32_t completion_update: 1;
26 uint32_t reserved2: 1;
27 uint32_t src_page_break: 1;
28 uint32_t dest_page_break: 1;
32 uint32_t reserved: 13;
39 uint64_t op_specific[4];
44 * Identify the data path to use.
45 * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
47 enum rte_ioat_dev_type {
54 * some statistics for tracking, if added/changed update xstats fns
56 struct rte_ioat_xstats {
57 uint64_t enqueue_failed;
65 * Structure representing an IOAT device instance
67 struct rte_ioat_rawdev {
68 /* common fields at the top - match those in rte_idxd_rawdev */
69 enum rte_ioat_dev_type type;
70 struct rte_ioat_xstats xstats;
72 struct rte_rawdev *rawdev;
73 const struct rte_memzone *mz;
74 const struct rte_memzone *desc_mz;
76 volatile uint16_t *doorbell __rte_cache_aligned;
77 phys_addr_t status_addr;
78 phys_addr_t ring_addr;
80 unsigned short ring_size;
82 struct rte_ioat_generic_hw_desc *desc_ring;
83 __m128i *hdls; /* completion handles for returning to user */
86 unsigned short next_read;
87 unsigned short next_write;
89 /* to report completions, the device will write status back here */
90 volatile uint64_t status __rte_cache_aligned;
92 /* pointer to the register bar */
93 volatile struct rte_ioat_registers *regs;
96 #define RTE_IOAT_CHANSTS_IDLE 0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
98 #define RTE_IOAT_CHANSTS_HALTED 0x3
99 #define RTE_IOAT_CHANSTS_ARMED 0x4
102 * Defines used in the data path for interacting with hardware.
104 #define IDXD_CMD_OP_SHIFT 24
113 #define IDXD_FLAG_FENCE (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
119 * Hardware descriptor used by DSA hardware, for both bursts and
120 * for individual operations.
122 struct rte_idxd_hw_desc {
125 rte_iova_t completion;
129 rte_iova_t src; /* source address for copy ops etc. */
130 rte_iova_t desc_addr; /* descriptor pointer for batch */
134 uint32_t size; /* length of data for op, or batch size */
136 /* 28 bytes of padding here */
140 * Completion record structure written back by DSA
142 struct rte_idxd_completion {
145 /* 16-bits pad here */
146 uint32_t completed_size; /* data length, or descriptors for batch */
148 rte_iova_t fault_address;
149 uint32_t invalid_flags;
152 #define BATCH_SIZE 64
155 * Structure used inside the driver for building up and submitting
156 * a batch of operations to the DSA hardware.
158 struct rte_idxd_desc_batch {
159 struct rte_idxd_completion comp; /* the completion record for batch */
165 struct rte_idxd_hw_desc batch_desc;
167 /* batches must always have 2 descriptors, so put a null at the start */
168 struct rte_idxd_hw_desc null_desc;
169 struct rte_idxd_hw_desc ops[BATCH_SIZE];
173 * structure used to save the "handles" provided by the user to be
174 * returned to the user on job completion.
176 struct rte_idxd_user_hdl {
183 * Structure representing an IDXD device instance
185 struct rte_idxd_rawdev {
186 enum rte_ioat_dev_type type;
187 void *portal; /* address to write the batch descriptor */
189 /* counters to track the batches and the individual op handles */
190 uint16_t batch_ring_sz; /* size of batch ring */
191 uint16_t hdl_ring_sz; /* size of the user hdl ring */
193 uint16_t next_batch; /* where we write descriptor ops */
194 uint16_t next_completed; /* batch where we read completions */
195 uint16_t next_ret_hdl; /* the next user hdl to return */
196 uint16_t last_completed_hdl; /* the last user hdl that has completed */
197 uint16_t next_free_hdl; /* where the handle for next op will go */
198 uint16_t hdls_disable; /* disable tracking completion handles */
200 struct rte_idxd_user_hdl *hdl_ring;
201 struct rte_idxd_desc_batch *batch_ring;
205 * Enqueue a copy operation onto the ioat device
207 static __rte_always_inline int
208 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
209 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
211 struct rte_ioat_rawdev *ioat =
212 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
213 unsigned short read = ioat->next_read;
214 unsigned short write = ioat->next_write;
215 unsigned short mask = ioat->ring_size - 1;
216 unsigned short space = mask + read - write;
217 struct rte_ioat_generic_hw_desc *desc;
220 ioat->xstats.enqueue_failed++;
224 ioat->next_write = write + 1;
227 desc = &ioat->desc_ring[write];
229 /* set descriptor write-back every 16th descriptor */
230 desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
231 desc->src_addr = src;
232 desc->dest_addr = dst;
234 if (!ioat->hdls_disable)
235 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
237 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
239 ioat->xstats.enqueued++;
243 /* add fence to last written descriptor */
244 static __rte_always_inline int
245 __ioat_fence(int dev_id)
247 struct rte_ioat_rawdev *ioat =
248 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
249 unsigned short write = ioat->next_write;
250 unsigned short mask = ioat->ring_size - 1;
251 struct rte_ioat_generic_hw_desc *desc;
253 write = (write - 1) & mask;
254 desc = &ioat->desc_ring[write];
256 desc->u.control.fence = 1;
261 * Trigger hardware to begin performing enqueued operations
263 static __rte_always_inline void
264 __ioat_perform_ops(int dev_id)
266 struct rte_ioat_rawdev *ioat =
267 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
268 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
269 .control.completion_update = 1;
270 rte_compiler_barrier();
271 *ioat->doorbell = ioat->next_write;
272 ioat->xstats.started = ioat->xstats.enqueued;
277 * Returns the index of the last completed operation.
279 static __rte_always_inline int
280 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
282 uint64_t status = ioat->status;
284 /* lower 3 bits indicate "transfer status" : active, idle, halted.
285 * We can ignore bit 0.
287 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
288 return (status - ioat->ring_addr) >> 6;
292 * Returns details of operations that have been completed
294 static __rte_always_inline int
295 __ioat_completed_ops(int dev_id, uint8_t max_copies,
296 uintptr_t *src_hdls, uintptr_t *dst_hdls)
298 struct rte_ioat_rawdev *ioat =
299 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
300 unsigned short mask = (ioat->ring_size - 1);
301 unsigned short read = ioat->next_read;
302 unsigned short end_read, count;
306 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
307 count = (end_read - (read & mask)) & mask;
314 if (ioat->hdls_disable) {
319 if (count > max_copies)
322 for (; i < count - 1; i += 2, read += 2) {
323 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
324 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
326 _mm_storeu_si128((__m128i *)&src_hdls[i],
327 _mm_unpacklo_epi64(hdls0, hdls1));
328 _mm_storeu_si128((__m128i *)&dst_hdls[i],
329 _mm_unpackhi_epi64(hdls0, hdls1));
331 for (; i < count; i++, read++) {
332 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
333 src_hdls[i] = hdls[0];
334 dst_hdls[i] = hdls[1];
338 ioat->next_read = read;
339 ioat->xstats.completed += count;
343 static __rte_always_inline int
344 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
345 const struct rte_idxd_user_hdl *hdl)
347 struct rte_idxd_rawdev *idxd =
348 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
349 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
351 /* check for room in the handle ring */
352 if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
355 /* check for space in current batch */
356 if (b->op_count >= BATCH_SIZE)
359 /* check that we can actually use the current batch */
363 /* write the descriptor */
364 b->ops[b->op_count++] = *desc;
366 /* store the completion details */
367 if (!idxd->hdls_disable)
368 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
369 if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
370 idxd->next_free_hdl = 0;
379 static __rte_always_inline int
380 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
381 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
383 const struct rte_idxd_hw_desc desc = {
384 .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
385 IDXD_FLAG_CACHE_CONTROL,
390 const struct rte_idxd_user_hdl hdl = {
394 return __idxd_write_desc(dev_id, &desc, &hdl);
397 static __rte_always_inline int
398 __idxd_fence(int dev_id)
400 static const struct rte_idxd_hw_desc fence = {
401 .op_flags = IDXD_FLAG_FENCE
403 static const struct rte_idxd_user_hdl null_hdl;
404 return __idxd_write_desc(dev_id, &fence, &null_hdl);
407 static __rte_always_inline void
408 __idxd_movdir64b(volatile void *dst, const void *src)
410 asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
412 : "a" (dst), "d" (src));
415 static __rte_always_inline void
416 __idxd_perform_ops(int dev_id)
418 struct rte_idxd_rawdev *idxd =
419 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
420 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
422 if (b->submitted || b->op_count == 0)
424 b->hdl_end = idxd->next_free_hdl;
427 b->batch_desc.size = b->op_count + 1;
428 __idxd_movdir64b(idxd->portal, &b->batch_desc);
430 if (++idxd->next_batch == idxd->batch_ring_sz)
431 idxd->next_batch = 0;
434 static __rte_always_inline int
435 __idxd_completed_ops(int dev_id, uint8_t max_ops,
436 uintptr_t *src_hdls, uintptr_t *dst_hdls)
438 struct rte_idxd_rawdev *idxd =
439 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
440 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
441 uint16_t h_idx = idxd->next_ret_hdl;
444 while (b->submitted && b->comp.status != 0) {
445 idxd->last_completed_hdl = b->hdl_end;
448 if (++idxd->next_completed == idxd->batch_ring_sz)
449 idxd->next_completed = 0;
450 b = &idxd->batch_ring[idxd->next_completed];
453 if (!idxd->hdls_disable)
454 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
455 src_hdls[n] = idxd->hdl_ring[h_idx].src;
456 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
457 if (++h_idx == idxd->hdl_ring_sz)
461 while (h_idx != idxd->last_completed_hdl) {
463 if (++h_idx == idxd->hdl_ring_sz)
467 idxd->next_ret_hdl = h_idx;
473 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
474 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
476 enum rte_ioat_dev_type *type =
477 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
478 if (*type == RTE_IDXD_DEV)
479 return __idxd_enqueue_copy(dev_id, src, dst, length,
482 return __ioat_enqueue_copy(dev_id, src, dst, length,
487 rte_ioat_fence(int dev_id)
489 enum rte_ioat_dev_type *type =
490 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
491 if (*type == RTE_IDXD_DEV)
492 return __idxd_fence(dev_id);
494 return __ioat_fence(dev_id);
498 rte_ioat_perform_ops(int dev_id)
500 enum rte_ioat_dev_type *type =
501 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
502 if (*type == RTE_IDXD_DEV)
503 return __idxd_perform_ops(dev_id);
505 return __ioat_perform_ops(dev_id);
509 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
510 uintptr_t *src_hdls, uintptr_t *dst_hdls)
512 enum rte_ioat_dev_type *type =
513 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
514 if (*type == RTE_IDXD_DEV)
515 return __idxd_completed_ops(dev_id, max_copies,
518 return __ioat_completed_ops(dev_id, max_copies,
523 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
524 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
527 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
528 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
529 uintptr_t *src_hdls, uintptr_t *dst_hdls)
531 return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
534 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */