1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
14 * Structure representing a device descriptor
16 struct rte_ioat_generic_hw_desc {
21 uint32_t int_enable: 1;
22 uint32_t src_snoop_disable: 1;
23 uint32_t dest_snoop_disable: 1;
24 uint32_t completion_update: 1;
26 uint32_t reserved2: 1;
27 uint32_t src_page_break: 1;
28 uint32_t dest_page_break: 1;
32 uint32_t reserved: 13;
39 uint64_t op_specific[4];
44 * Structure representing a device instance
46 struct rte_ioat_rawdev {
47 struct rte_rawdev *rawdev;
48 const struct rte_memzone *mz;
49 const struct rte_memzone *desc_mz;
51 volatile uint16_t *doorbell;
52 phys_addr_t status_addr;
53 phys_addr_t ring_addr;
55 unsigned short ring_size;
57 struct rte_ioat_generic_hw_desc *desc_ring;
58 __m128i *hdls; /* completion handles for returning to user */
61 unsigned short next_read;
62 unsigned short next_write;
64 /* some statistics for tracking, if added/changed update xstats fns*/
65 uint64_t enqueue_failed __rte_cache_aligned;
70 /* to report completions, the device will write status back here */
71 volatile uint64_t status __rte_cache_aligned;
73 /* pointer to the register bar */
74 volatile struct rte_ioat_registers *regs;
77 #define RTE_IOAT_CHANSTS_IDLE 0x1
78 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
79 #define RTE_IOAT_CHANSTS_HALTED 0x3
80 #define RTE_IOAT_CHANSTS_ARMED 0x4
83 * Enqueue a copy operation onto the ioat device
86 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
87 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
89 struct rte_ioat_rawdev *ioat =
90 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
91 unsigned short read = ioat->next_read;
92 unsigned short write = ioat->next_write;
93 unsigned short mask = ioat->ring_size - 1;
94 unsigned short space = mask + read - write;
95 struct rte_ioat_generic_hw_desc *desc;
98 ioat->enqueue_failed++;
102 ioat->next_write = write + 1;
105 desc = &ioat->desc_ring[write];
107 /* set descriptor write-back every 16th descriptor */
108 desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
109 desc->src_addr = src;
110 desc->dest_addr = dst;
112 if (!ioat->hdls_disable)
113 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
115 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
121 /* add fence to last written descriptor */
123 rte_ioat_fence(int dev_id)
125 struct rte_ioat_rawdev *ioat =
126 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
127 unsigned short write = ioat->next_write;
128 unsigned short mask = ioat->ring_size - 1;
129 struct rte_ioat_generic_hw_desc *desc;
131 write = (write - 1) & mask;
132 desc = &ioat->desc_ring[write];
134 desc->u.control.fence = 1;
139 * Trigger hardware to begin performing enqueued operations
142 rte_ioat_perform_ops(int dev_id)
144 struct rte_ioat_rawdev *ioat =
145 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
146 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
147 .control.completion_update = 1;
148 rte_compiler_barrier();
149 *ioat->doorbell = ioat->next_write;
150 ioat->started = ioat->enqueued;
155 * Returns the index of the last completed operation.
158 rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
160 uint64_t status = ioat->status;
162 /* lower 3 bits indicate "transfer status" : active, idle, halted.
163 * We can ignore bit 0.
165 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
166 return (status - ioat->ring_addr) >> 6;
170 * Returns details of operations that have been completed
173 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
174 uintptr_t *src_hdls, uintptr_t *dst_hdls)
176 struct rte_ioat_rawdev *ioat =
177 (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
178 unsigned short mask = (ioat->ring_size - 1);
179 unsigned short read = ioat->next_read;
180 unsigned short end_read, count;
184 end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
185 count = (end_read - (read & mask)) & mask;
192 if (ioat->hdls_disable) {
197 if (count > max_copies)
200 for (; i < count - 1; i += 2, read += 2) {
201 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
202 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
204 _mm_storeu_si128((__m128i *)&src_hdls[i],
205 _mm_unpacklo_epi64(hdls0, hdls1));
206 _mm_storeu_si128((__m128i *)&dst_hdls[i],
207 _mm_unpackhi_epi64(hdls0, hdls1));
209 for (; i < count; i++, read++) {
210 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
211 src_hdls[i] = hdls[0];
212 dst_hdls[i] = hdls[1];
216 ioat->next_read = read;
217 ioat->completed += count;
222 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
223 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
226 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
227 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
228 uintptr_t *src_hdls, uintptr_t *dst_hdls)
230 return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
233 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */