1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
14 * Structure representing a device descriptor
16 struct rte_ioat_generic_hw_desc {
21 uint32_t int_enable: 1;
22 uint32_t src_snoop_disable: 1;
23 uint32_t dest_snoop_disable: 1;
24 uint32_t completion_update: 1;
26 uint32_t reserved2: 1;
27 uint32_t src_page_break: 1;
28 uint32_t dest_page_break: 1;
32 uint32_t reserved: 13;
39 uint64_t op_specific[4];
44 * Identify the data path to use.
45 * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
47 enum rte_ioat_dev_type {
54 * some statistics for tracking, if added/changed update xstats fns
56 struct rte_ioat_xstats {
57 uint64_t enqueue_failed;
65 * Structure representing an IOAT device instance
67 struct rte_ioat_rawdev {
68 /* common fields at the top - match those in rte_idxd_rawdev */
69 enum rte_ioat_dev_type type;
70 struct rte_ioat_xstats xstats;
72 struct rte_rawdev *rawdev;
73 const struct rte_memzone *mz;
74 const struct rte_memzone *desc_mz;
76 volatile uint16_t *doorbell __rte_cache_aligned;
77 phys_addr_t status_addr;
78 phys_addr_t ring_addr;
80 unsigned short ring_size;
82 struct rte_ioat_generic_hw_desc *desc_ring;
83 __m128i *hdls; /* completion handles for returning to user */
86 unsigned short next_read;
87 unsigned short next_write;
89 /* to report completions, the device will write status back here */
90 volatile uint64_t status __rte_cache_aligned;
92 /* pointer to the register bar */
93 volatile struct rte_ioat_registers *regs;
96 #define RTE_IOAT_CHANSTS_IDLE 0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
98 #define RTE_IOAT_CHANSTS_HALTED 0x3
99 #define RTE_IOAT_CHANSTS_ARMED 0x4
102 * Defines used in the data path for interacting with hardware.
104 #define IDXD_CMD_OP_SHIFT 24
113 #define IDXD_FLAG_FENCE (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
119 * Hardware descriptor used by DSA hardware, for both bursts and
120 * for individual operations.
122 struct rte_idxd_hw_desc {
125 rte_iova_t completion;
129 rte_iova_t src; /* source address for copy ops etc. */
130 rte_iova_t desc_addr; /* descriptor pointer for batch */
134 uint32_t size; /* length of data for op, or batch size */
136 /* 28 bytes of padding here */
140 * Completion record structure written back by DSA
142 struct rte_idxd_completion {
145 /* 16-bits pad here */
146 uint32_t completed_size; /* data length, or descriptors for batch */
148 rte_iova_t fault_address;
149 uint32_t invalid_flags;
152 #define BATCH_SIZE 64
155 * Structure used inside the driver for building up and submitting
156 * a batch of operations to the DSA hardware.
158 struct rte_idxd_desc_batch {
159 struct rte_idxd_completion comp; /* the completion record for batch */
165 struct rte_idxd_hw_desc batch_desc;
167 /* batches must always have 2 descriptors, so put a null at the start */
168 struct rte_idxd_hw_desc null_desc;
169 struct rte_idxd_hw_desc ops[BATCH_SIZE];
173 * structure used to save the "handles" provided by the user to be
174 * returned to the user on job completion.
176 struct rte_idxd_user_hdl {
183 * Structure representing an IDXD device instance
185 struct rte_idxd_rawdev {
186 enum rte_ioat_dev_type type;
187 struct rte_ioat_xstats xstats;
189 void *portal; /* address to write the batch descriptor */
191 /* counters to track the batches and the individual op handles */
192 uint16_t batch_ring_sz; /* size of batch ring */
193 uint16_t hdl_ring_sz; /* size of the user hdl ring */
195 uint16_t next_batch; /* where we write descriptor ops */
196 uint16_t next_completed; /* batch where we read completions */
197 uint16_t next_ret_hdl; /* the next user hdl to return */
198 uint16_t last_completed_hdl; /* the last user hdl that has completed */
199 uint16_t next_free_hdl; /* where the handle for next op will go */
200 uint16_t hdls_disable; /* disable tracking completion handles */
202 struct rte_idxd_user_hdl *hdl_ring;
203 struct rte_idxd_desc_batch *batch_ring;
207 * Enqueue a copy operation onto the ioat device
209 static __rte_always_inline int
210 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
211 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
213 struct rte_ioat_rawdev *ioat =
214 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
215 unsigned short read = ioat->next_read;
216 unsigned short write = ioat->next_write;
217 unsigned short mask = ioat->ring_size - 1;
218 unsigned short space = mask + read - write;
219 struct rte_ioat_generic_hw_desc *desc;
222 ioat->xstats.enqueue_failed++;
226 ioat->next_write = write + 1;
229 desc = &ioat->desc_ring[write];
231 /* set descriptor write-back every 16th descriptor */
232 desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
233 desc->src_addr = src;
234 desc->dest_addr = dst;
236 if (!ioat->hdls_disable)
237 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
239 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
241 ioat->xstats.enqueued++;
245 /* add fence to last written descriptor */
246 static __rte_always_inline int
247 __ioat_fence(int dev_id)
249 struct rte_ioat_rawdev *ioat =
250 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
251 unsigned short write = ioat->next_write;
252 unsigned short mask = ioat->ring_size - 1;
253 struct rte_ioat_generic_hw_desc *desc;
255 write = (write - 1) & mask;
256 desc = &ioat->desc_ring[write];
258 desc->u.control.fence = 1;
263 * Trigger hardware to begin performing enqueued operations
265 static __rte_always_inline void
266 __ioat_perform_ops(int dev_id)
268 struct rte_ioat_rawdev *ioat =
269 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
270 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
271 .control.completion_update = 1;
272 rte_compiler_barrier();
273 *ioat->doorbell = ioat->next_write;
274 ioat->xstats.started = ioat->xstats.enqueued;
279 * Returns the index of the last completed operation.
281 static __rte_always_inline int
282 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
284 uint64_t status = ioat->status;
286 /* lower 3 bits indicate "transfer status" : active, idle, halted.
287 * We can ignore bit 0.
289 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
290 return (status - ioat->ring_addr) >> 6;
294 * Returns details of operations that have been completed
296 static __rte_always_inline int
297 __ioat_completed_ops(int dev_id, uint8_t max_copies,
298 uintptr_t *src_hdls, uintptr_t *dst_hdls)
300 struct rte_ioat_rawdev *ioat =
301 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
302 unsigned short mask = (ioat->ring_size - 1);
303 unsigned short read = ioat->next_read;
304 unsigned short end_read, count;
308 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
309 count = (end_read - (read & mask)) & mask;
316 if (ioat->hdls_disable) {
321 if (count > max_copies)
324 for (; i < count - 1; i += 2, read += 2) {
325 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
326 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
328 _mm_storeu_si128((__m128i *)&src_hdls[i],
329 _mm_unpacklo_epi64(hdls0, hdls1));
330 _mm_storeu_si128((__m128i *)&dst_hdls[i],
331 _mm_unpackhi_epi64(hdls0, hdls1));
333 for (; i < count; i++, read++) {
334 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
335 src_hdls[i] = hdls[0];
336 dst_hdls[i] = hdls[1];
340 ioat->next_read = read;
341 ioat->xstats.completed += count;
345 static __rte_always_inline int
346 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
347 const struct rte_idxd_user_hdl *hdl)
349 struct rte_idxd_rawdev *idxd =
350 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
351 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
353 /* check for room in the handle ring */
354 if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
357 /* check for space in current batch */
358 if (b->op_count >= BATCH_SIZE)
361 /* check that we can actually use the current batch */
365 /* write the descriptor */
366 b->ops[b->op_count++] = *desc;
368 /* store the completion details */
369 if (!idxd->hdls_disable)
370 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
371 if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
372 idxd->next_free_hdl = 0;
374 idxd->xstats.enqueued++;
378 idxd->xstats.enqueue_failed++;
383 static __rte_always_inline int
384 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
385 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
387 const struct rte_idxd_hw_desc desc = {
388 .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
389 IDXD_FLAG_CACHE_CONTROL,
394 const struct rte_idxd_user_hdl hdl = {
398 return __idxd_write_desc(dev_id, &desc, &hdl);
401 static __rte_always_inline int
402 __idxd_fence(int dev_id)
404 static const struct rte_idxd_hw_desc fence = {
405 .op_flags = IDXD_FLAG_FENCE
407 static const struct rte_idxd_user_hdl null_hdl;
408 return __idxd_write_desc(dev_id, &fence, &null_hdl);
411 static __rte_always_inline void
412 __idxd_movdir64b(volatile void *dst, const void *src)
414 asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
416 : "a" (dst), "d" (src));
419 static __rte_always_inline void
420 __idxd_perform_ops(int dev_id)
422 struct rte_idxd_rawdev *idxd =
423 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
424 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
426 if (b->submitted || b->op_count == 0)
428 b->hdl_end = idxd->next_free_hdl;
431 b->batch_desc.size = b->op_count + 1;
432 __idxd_movdir64b(idxd->portal, &b->batch_desc);
434 if (++idxd->next_batch == idxd->batch_ring_sz)
435 idxd->next_batch = 0;
436 idxd->xstats.started = idxd->xstats.enqueued;
439 static __rte_always_inline int
440 __idxd_completed_ops(int dev_id, uint8_t max_ops,
441 uintptr_t *src_hdls, uintptr_t *dst_hdls)
443 struct rte_idxd_rawdev *idxd =
444 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
445 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
446 uint16_t h_idx = idxd->next_ret_hdl;
449 while (b->submitted && b->comp.status != 0) {
450 idxd->last_completed_hdl = b->hdl_end;
453 if (++idxd->next_completed == idxd->batch_ring_sz)
454 idxd->next_completed = 0;
455 b = &idxd->batch_ring[idxd->next_completed];
458 if (!idxd->hdls_disable)
459 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
460 src_hdls[n] = idxd->hdl_ring[h_idx].src;
461 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
462 if (++h_idx == idxd->hdl_ring_sz)
466 while (h_idx != idxd->last_completed_hdl) {
468 if (++h_idx == idxd->hdl_ring_sz)
472 idxd->next_ret_hdl = h_idx;
474 idxd->xstats.completed += n;
479 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
480 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
482 enum rte_ioat_dev_type *type =
483 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
484 if (*type == RTE_IDXD_DEV)
485 return __idxd_enqueue_copy(dev_id, src, dst, length,
488 return __ioat_enqueue_copy(dev_id, src, dst, length,
493 rte_ioat_fence(int dev_id)
495 enum rte_ioat_dev_type *type =
496 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
497 if (*type == RTE_IDXD_DEV)
498 return __idxd_fence(dev_id);
500 return __ioat_fence(dev_id);
504 rte_ioat_perform_ops(int dev_id)
506 enum rte_ioat_dev_type *type =
507 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
508 if (*type == RTE_IDXD_DEV)
509 return __idxd_perform_ops(dev_id);
511 return __ioat_perform_ops(dev_id);
515 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
516 uintptr_t *src_hdls, uintptr_t *dst_hdls)
518 enum rte_ioat_dev_type *type =
519 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
520 if (*type == RTE_IDXD_DEV)
521 return __idxd_completed_ops(dev_id, max_copies,
524 return __ioat_completed_ops(dev_id, max_copies,
529 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
530 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
533 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
534 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
535 uintptr_t *src_hdls, uintptr_t *dst_hdls)
537 return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
540 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */