1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #ifndef _RTE_IOAT_RAWDEV_H_
6 #define _RTE_IOAT_RAWDEV_H_
9 * @file rte_ioat_rawdev.h
11 * Definitions for using the ioat rawdev device driver
14 * @b EXPERIMENTAL: these structures and APIs may change without prior notice
17 #include <x86intrin.h>
18 #include <rte_atomic.h>
19 #include <rte_memory.h>
20 #include <rte_memzone.h>
21 #include <rte_prefetch.h>
22 #include "rte_ioat_spec.h"
24 /** Name of the device driver */
25 #define IOAT_PMD_RAWDEV_NAME rawdev_ioat
26 /** String reported as the device driver name by rte_rawdev_info_get() */
27 #define IOAT_PMD_RAWDEV_NAME_STR "rawdev_ioat"
28 /** Name used to adjust the log level for this driver */
29 #define IOAT_PMD_LOG_NAME "rawdev.ioat"
32 * Configuration structure for an ioat rawdev instance
34 * This structure is to be passed as the ".dev_private" parameter when
35 * calling the rte_rawdev_get_info() and rte_rawdev_configure() APIs on
36 * an ioat rawdev instance.
38 struct rte_ioat_rawdev_config {
39 unsigned short ring_size;
44 * Structure representing a device instance
46 struct rte_ioat_rawdev {
47 struct rte_rawdev *rawdev;
48 const struct rte_memzone *mz;
49 const struct rte_memzone *desc_mz;
51 volatile struct rte_ioat_registers *regs;
52 phys_addr_t status_addr;
53 phys_addr_t ring_addr;
55 unsigned short ring_size;
56 struct rte_ioat_generic_hw_desc *desc_ring;
57 __m128i *hdls; /* completion handles for returning to user */
60 unsigned short next_read;
61 unsigned short next_write;
63 /* some statistics for tracking, if added/changed update xstats fns*/
64 uint64_t enqueue_failed __rte_cache_aligned;
69 /* to report completions, the device will write status back here */
70 volatile uint64_t status __rte_cache_aligned;
74 * Enqueue a copy operation onto the ioat device
76 * This queues up a copy operation to be performed by hardware, but does not
77 * trigger hardware to begin that operation.
80 * The rawdev device id of the ioat instance
82 * The physical address of the source buffer
84 * The physical address of the destination buffer
86 * The length of the data to be copied
88 * An opaque handle for the source data, to be returned when this operation
89 * has been completed and the user polls for the completion details
91 * An opaque handle for the destination data, to be returned when this
92 * operation has been completed and the user polls for the completion details
94 * A flag parameter indicating that hardware should not begin to perform any
95 * subsequently enqueued copy operations until after this operation has
98 * Number of operations enqueued, either 0 or 1
101 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
102 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
105 struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
106 unsigned short read = ioat->next_read;
107 unsigned short write = ioat->next_write;
108 unsigned short mask = ioat->ring_size - 1;
109 unsigned short space = mask + read - write;
110 struct rte_ioat_generic_hw_desc *desc;
113 ioat->enqueue_failed++;
117 ioat->next_write = write + 1;
120 desc = &ioat->desc_ring[write];
122 /* set descriptor write-back every 16th descriptor */
123 desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
124 desc->src_addr = src;
125 desc->dest_addr = dst;
127 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl, (int64_t)src_hdl);
128 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
135 * Trigger hardware to begin performing enqueued copy operations
137 * This API is used to write the "doorbell" to the hardware to trigger it
138 * to begin the copy operations previously enqueued by rte_ioat_enqueue_copy()
141 * The rawdev device id of the ioat instance
144 rte_ioat_do_copies(int dev_id)
146 struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
147 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
148 .control.completion_update = 1;
149 rte_compiler_barrier();
150 ioat->regs->dmacount = ioat->next_write;
151 ioat->started = ioat->enqueued;
156 * Returns the index of the last completed operation.
159 rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
161 uint64_t status = ioat->status;
163 /* lower 3 bits indicate "transfer status" : active, idle, halted.
164 * We can ignore bit 0.
166 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
167 return (status - ioat->ring_addr) >> 6;
171 * Returns details of copy operations that have been completed
173 * Returns to the caller the user-provided "handles" for the copy operations
174 * which have been completed by the hardware, and not already returned by
175 * a previous call to this API.
178 * The rawdev device id of the ioat instance
180 * The number of entries which can fit in the src_hdls and dst_hdls
181 * arrays, i.e. max number of completed operations to report
183 * Array to hold the source handle parameters of the completed copies
185 * Array to hold the destination handle parameters of the completed copies
187 * -1 on error, with rte_errno set appropriately.
188 * Otherwise number of completed operations i.e. number of entries written
189 * to the src_hdls and dst_hdls array parameters.
192 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
193 uintptr_t *src_hdls, uintptr_t *dst_hdls)
195 struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
196 unsigned short mask = (ioat->ring_size - 1);
197 unsigned short read = ioat->next_read;
198 unsigned short end_read, count;
202 end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
203 count = (end_read - (read & mask)) & mask;
210 if (count > max_copies)
213 for (; i < count - 1; i += 2, read += 2) {
214 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
215 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
217 _mm_storeu_si128((void *)&src_hdls[i],
218 _mm_unpacklo_epi64(hdls0, hdls1));
219 _mm_storeu_si128((void *)&dst_hdls[i],
220 _mm_unpackhi_epi64(hdls0, hdls1));
222 for (; i < count; i++, read++) {
223 uintptr_t *hdls = (void *)&ioat->hdls[read & mask];
224 src_hdls[i] = hdls[0];
225 dst_hdls[i] = hdls[1];
228 ioat->next_read = read;
229 ioat->completed += count;
233 #endif /* _RTE_IOAT_RAWDEV_H_ */