1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
14 * Structure representing a device descriptor
16 struct rte_ioat_generic_hw_desc {
21 uint32_t int_enable: 1;
22 uint32_t src_snoop_disable: 1;
23 uint32_t dest_snoop_disable: 1;
24 uint32_t completion_update: 1;
26 uint32_t reserved2: 1;
27 uint32_t src_page_break: 1;
28 uint32_t dest_page_break: 1;
32 uint32_t reserved: 13;
39 uint64_t op_specific[4];
44 * Identify the data path to use.
45 * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
47 enum rte_ioat_dev_type {
54 * Structure representing an IOAT device instance
56 struct rte_ioat_rawdev {
57 enum rte_ioat_dev_type type;
58 struct rte_rawdev *rawdev;
59 const struct rte_memzone *mz;
60 const struct rte_memzone *desc_mz;
62 volatile uint16_t *doorbell;
63 phys_addr_t status_addr;
64 phys_addr_t ring_addr;
66 unsigned short ring_size;
68 struct rte_ioat_generic_hw_desc *desc_ring;
69 __m128i *hdls; /* completion handles for returning to user */
72 unsigned short next_read;
73 unsigned short next_write;
75 /* some statistics for tracking, if added/changed update xstats fns*/
76 uint64_t enqueue_failed __rte_cache_aligned;
81 /* to report completions, the device will write status back here */
82 volatile uint64_t status __rte_cache_aligned;
84 /* pointer to the register bar */
85 volatile struct rte_ioat_registers *regs;
88 #define RTE_IOAT_CHANSTS_IDLE 0x1
89 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
90 #define RTE_IOAT_CHANSTS_HALTED 0x3
91 #define RTE_IOAT_CHANSTS_ARMED 0x4
94 * Defines used in the data path for interacting with hardware.
96 #define IDXD_CMD_OP_SHIFT 24
105 #define IDXD_FLAG_FENCE (1 << 0)
106 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
107 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
108 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
111 * Hardware descriptor used by DSA hardware, for both bursts and
112 * for individual operations.
114 struct rte_idxd_hw_desc {
117 rte_iova_t completion;
121 rte_iova_t src; /* source address for copy ops etc. */
122 rte_iova_t desc_addr; /* descriptor pointer for batch */
126 uint32_t size; /* length of data for op, or batch size */
128 /* 28 bytes of padding here */
132 * Completion record structure written back by DSA
134 struct rte_idxd_completion {
137 /* 16-bits pad here */
138 uint32_t completed_size; /* data length, or descriptors for batch */
140 rte_iova_t fault_address;
141 uint32_t invalid_flags;
144 #define BATCH_SIZE 64
147 * Structure used inside the driver for building up and submitting
148 * a batch of operations to the DSA hardware.
150 struct rte_idxd_desc_batch {
151 struct rte_idxd_completion comp; /* the completion record for batch */
157 struct rte_idxd_hw_desc batch_desc;
159 /* batches must always have 2 descriptors, so put a null at the start */
160 struct rte_idxd_hw_desc null_desc;
161 struct rte_idxd_hw_desc ops[BATCH_SIZE];
165 * structure used to save the "handles" provided by the user to be
166 * returned to the user on job completion.
168 struct rte_idxd_user_hdl {
175 * Structure representing an IDXD device instance
177 struct rte_idxd_rawdev {
178 enum rte_ioat_dev_type type;
179 void *portal; /* address to write the batch descriptor */
181 /* counters to track the batches and the individual op handles */
182 uint16_t batch_ring_sz; /* size of batch ring */
183 uint16_t hdl_ring_sz; /* size of the user hdl ring */
185 uint16_t next_batch; /* where we write descriptor ops */
186 uint16_t next_completed; /* batch where we read completions */
187 uint16_t next_ret_hdl; /* the next user hdl to return */
188 uint16_t last_completed_hdl; /* the last user hdl that has completed */
189 uint16_t next_free_hdl; /* where the handle for next op will go */
190 uint16_t hdls_disable; /* disable tracking completion handles */
192 struct rte_idxd_user_hdl *hdl_ring;
193 struct rte_idxd_desc_batch *batch_ring;
197 * Enqueue a copy operation onto the ioat device
199 static __rte_always_inline int
200 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
201 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
203 struct rte_ioat_rawdev *ioat =
204 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
205 unsigned short read = ioat->next_read;
206 unsigned short write = ioat->next_write;
207 unsigned short mask = ioat->ring_size - 1;
208 unsigned short space = mask + read - write;
209 struct rte_ioat_generic_hw_desc *desc;
212 ioat->enqueue_failed++;
216 ioat->next_write = write + 1;
219 desc = &ioat->desc_ring[write];
221 /* set descriptor write-back every 16th descriptor */
222 desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
223 desc->src_addr = src;
224 desc->dest_addr = dst;
226 if (!ioat->hdls_disable)
227 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
229 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
235 /* add fence to last written descriptor */
236 static __rte_always_inline int
237 __ioat_fence(int dev_id)
239 struct rte_ioat_rawdev *ioat =
240 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
241 unsigned short write = ioat->next_write;
242 unsigned short mask = ioat->ring_size - 1;
243 struct rte_ioat_generic_hw_desc *desc;
245 write = (write - 1) & mask;
246 desc = &ioat->desc_ring[write];
248 desc->u.control.fence = 1;
253 * Trigger hardware to begin performing enqueued operations
255 static __rte_always_inline void
256 __ioat_perform_ops(int dev_id)
258 struct rte_ioat_rawdev *ioat =
259 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
260 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
261 .control.completion_update = 1;
262 rte_compiler_barrier();
263 *ioat->doorbell = ioat->next_write;
264 ioat->started = ioat->enqueued;
269 * Returns the index of the last completed operation.
271 static __rte_always_inline int
272 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
274 uint64_t status = ioat->status;
276 /* lower 3 bits indicate "transfer status" : active, idle, halted.
277 * We can ignore bit 0.
279 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
280 return (status - ioat->ring_addr) >> 6;
284 * Returns details of operations that have been completed
286 static __rte_always_inline int
287 __ioat_completed_ops(int dev_id, uint8_t max_copies,
288 uintptr_t *src_hdls, uintptr_t *dst_hdls)
290 struct rte_ioat_rawdev *ioat =
291 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
292 unsigned short mask = (ioat->ring_size - 1);
293 unsigned short read = ioat->next_read;
294 unsigned short end_read, count;
298 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
299 count = (end_read - (read & mask)) & mask;
306 if (ioat->hdls_disable) {
311 if (count > max_copies)
314 for (; i < count - 1; i += 2, read += 2) {
315 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
316 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
318 _mm_storeu_si128((__m128i *)&src_hdls[i],
319 _mm_unpacklo_epi64(hdls0, hdls1));
320 _mm_storeu_si128((__m128i *)&dst_hdls[i],
321 _mm_unpackhi_epi64(hdls0, hdls1));
323 for (; i < count; i++, read++) {
324 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
325 src_hdls[i] = hdls[0];
326 dst_hdls[i] = hdls[1];
330 ioat->next_read = read;
331 ioat->completed += count;
335 static __rte_always_inline int
336 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
337 const struct rte_idxd_user_hdl *hdl)
339 struct rte_idxd_rawdev *idxd =
340 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
341 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
343 /* check for room in the handle ring */
344 if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
347 /* check for space in current batch */
348 if (b->op_count >= BATCH_SIZE)
351 /* check that we can actually use the current batch */
355 /* write the descriptor */
356 b->ops[b->op_count++] = *desc;
358 /* store the completion details */
359 if (!idxd->hdls_disable)
360 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
361 if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
362 idxd->next_free_hdl = 0;
371 static __rte_always_inline int
372 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
373 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
375 const struct rte_idxd_hw_desc desc = {
376 .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
377 IDXD_FLAG_CACHE_CONTROL,
382 const struct rte_idxd_user_hdl hdl = {
386 return __idxd_write_desc(dev_id, &desc, &hdl);
389 static __rte_always_inline int
390 __idxd_fence(int dev_id)
392 static const struct rte_idxd_hw_desc fence = {
393 .op_flags = IDXD_FLAG_FENCE
395 static const struct rte_idxd_user_hdl null_hdl;
396 return __idxd_write_desc(dev_id, &fence, &null_hdl);
399 static __rte_always_inline void
400 __idxd_movdir64b(volatile void *dst, const void *src)
402 asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
404 : "a" (dst), "d" (src));
407 static __rte_always_inline void
408 __idxd_perform_ops(int dev_id)
410 struct rte_idxd_rawdev *idxd =
411 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
412 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
414 if (b->submitted || b->op_count == 0)
416 b->hdl_end = idxd->next_free_hdl;
419 b->batch_desc.size = b->op_count + 1;
420 __idxd_movdir64b(idxd->portal, &b->batch_desc);
422 if (++idxd->next_batch == idxd->batch_ring_sz)
423 idxd->next_batch = 0;
426 static __rte_always_inline int
427 __idxd_completed_ops(int dev_id, uint8_t max_ops,
428 uintptr_t *src_hdls, uintptr_t *dst_hdls)
430 struct rte_idxd_rawdev *idxd =
431 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
432 struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
433 uint16_t h_idx = idxd->next_ret_hdl;
436 while (b->submitted && b->comp.status != 0) {
437 idxd->last_completed_hdl = b->hdl_end;
440 if (++idxd->next_completed == idxd->batch_ring_sz)
441 idxd->next_completed = 0;
442 b = &idxd->batch_ring[idxd->next_completed];
445 if (!idxd->hdls_disable)
446 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
447 src_hdls[n] = idxd->hdl_ring[h_idx].src;
448 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
449 if (++h_idx == idxd->hdl_ring_sz)
453 while (h_idx != idxd->last_completed_hdl) {
455 if (++h_idx == idxd->hdl_ring_sz)
459 idxd->next_ret_hdl = h_idx;
465 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
466 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
468 enum rte_ioat_dev_type *type =
469 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
470 if (*type == RTE_IDXD_DEV)
471 return __idxd_enqueue_copy(dev_id, src, dst, length,
474 return __ioat_enqueue_copy(dev_id, src, dst, length,
479 rte_ioat_fence(int dev_id)
481 enum rte_ioat_dev_type *type =
482 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
483 if (*type == RTE_IDXD_DEV)
484 return __idxd_fence(dev_id);
486 return __ioat_fence(dev_id);
490 rte_ioat_perform_ops(int dev_id)
492 enum rte_ioat_dev_type *type =
493 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
494 if (*type == RTE_IDXD_DEV)
495 return __idxd_perform_ops(dev_id);
497 return __ioat_perform_ops(dev_id);
501 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
502 uintptr_t *src_hdls, uintptr_t *dst_hdls)
504 enum rte_ioat_dev_type *type =
505 (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
506 if (*type == RTE_IDXD_DEV)
507 return __idxd_completed_ops(dev_id, max_copies,
510 return __ioat_completed_ops(dev_id, max_copies,
515 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
516 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
519 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
520 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
521 uintptr_t *src_hdls, uintptr_t *dst_hdls)
523 return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
526 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */