1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11 #include "rte_ioat_spec.h"
15 * Structure representing a device instance
17 struct rte_ioat_rawdev {
18 struct rte_rawdev *rawdev;
19 const struct rte_memzone *mz;
20 const struct rte_memzone *desc_mz;
22 volatile struct rte_ioat_registers *regs;
23 phys_addr_t status_addr;
24 phys_addr_t ring_addr;
26 unsigned short ring_size;
28 struct rte_ioat_generic_hw_desc *desc_ring;
29 __m128i *hdls; /* completion handles for returning to user */
32 unsigned short next_read;
33 unsigned short next_write;
35 /* some statistics for tracking, if added/changed update xstats fns*/
36 uint64_t enqueue_failed __rte_cache_aligned;
41 /* to report completions, the device will write status back here */
42 volatile uint64_t status __rte_cache_aligned;
46 * Enqueue a copy operation onto the ioat device
49 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
50 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
53 struct rte_ioat_rawdev *ioat =
54 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
55 unsigned short read = ioat->next_read;
56 unsigned short write = ioat->next_write;
57 unsigned short mask = ioat->ring_size - 1;
58 unsigned short space = mask + read - write;
59 struct rte_ioat_generic_hw_desc *desc;
62 ioat->enqueue_failed++;
66 ioat->next_write = write + 1;
69 desc = &ioat->desc_ring[write];
71 /* set descriptor write-back every 16th descriptor */
72 desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
74 desc->dest_addr = dst;
76 if (!ioat->hdls_disable)
77 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
79 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
86 * Trigger hardware to begin performing enqueued copy operations
89 rte_ioat_do_copies(int dev_id)
91 struct rte_ioat_rawdev *ioat =
92 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
93 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
94 .control.completion_update = 1;
95 rte_compiler_barrier();
96 ioat->regs->dmacount = ioat->next_write;
97 ioat->started = ioat->enqueued;
102 * Returns the index of the last completed operation.
105 rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
107 uint64_t status = ioat->status;
109 /* lower 3 bits indicate "transfer status" : active, idle, halted.
110 * We can ignore bit 0.
112 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
113 return (status - ioat->ring_addr) >> 6;
117 * Returns details of copy operations that have been completed
120 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
121 uintptr_t *src_hdls, uintptr_t *dst_hdls)
123 struct rte_ioat_rawdev *ioat =
124 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
125 unsigned short mask = (ioat->ring_size - 1);
126 unsigned short read = ioat->next_read;
127 unsigned short end_read, count;
131 end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
132 count = (end_read - (read & mask)) & mask;
139 if (ioat->hdls_disable) {
144 if (count > max_copies)
147 for (; i < count - 1; i += 2, read += 2) {
148 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
149 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
151 _mm_storeu_si128((__m128i *)&src_hdls[i],
152 _mm_unpacklo_epi64(hdls0, hdls1));
153 _mm_storeu_si128((__m128i *)&dst_hdls[i],
154 _mm_unpackhi_epi64(hdls0, hdls1));
156 for (; i < count; i++, read++) {
157 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
158 src_hdls[i] = hdls[0];
159 dst_hdls[i] = hdls[1];
163 ioat->next_read = read;
164 ioat->completed += count;
168 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */