1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_eal_memconfig.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
14 #include "rte_reorder.h"
16 TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
18 static struct rte_tailq_elem rte_reorder_tailq = {
19 .name = "RTE_REORDER",
21 EAL_REGISTER_TAILQ(rte_reorder_tailq)
24 #define RTE_REORDER_PREFIX "RO_"
25 #define RTE_REORDER_NAMESIZE 32
27 /* Macros for printing using RTE_LOG */
28 #define RTE_LOGTYPE_REORDER RTE_LOGTYPE_USER1
30 /* A generic circular buffer */
32 unsigned int size; /**< Number of entries that can be stored */
33 unsigned int mask; /**< [buffer_size - 1]: used for wrap-around */
34 unsigned int head; /**< insertion point in buffer */
35 unsigned int tail; /**< extraction point in buffer */
36 struct rte_mbuf **entries;
37 } __rte_cache_aligned;
39 /* The reorder buffer data structure itself */
40 struct rte_reorder_buffer {
41 char name[RTE_REORDER_NAMESIZE];
42 uint32_t min_seqn; /**< Lowest seq. number that can be in the buffer */
43 unsigned int memsize; /**< memory area size of reorder buffer */
44 struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
45 struct cir_buffer order_buf; /**< buffer used to reorder entries */
47 } __rte_cache_aligned;
50 rte_reorder_free_mbufs(struct rte_reorder_buffer *b);
52 struct rte_reorder_buffer *
53 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
54 const char *name, unsigned int size)
56 const unsigned int min_bufsize = sizeof(*b) +
57 (2 * size * sizeof(struct rte_mbuf *));
60 RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:"
65 if (!rte_is_power_of_2(size)) {
66 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
67 " - Not a power of 2\n");
72 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
77 if (bufsize < min_bufsize) {
78 RTE_LOG(ERR, REORDER, "Invalid reorder buffer memory size: %u, "
79 "minimum required: %u\n", bufsize, min_bufsize);
84 memset(b, 0, bufsize);
85 snprintf(b->name, sizeof(b->name), "%s", name);
87 b->order_buf.size = b->ready_buf.size = size;
88 b->order_buf.mask = b->ready_buf.mask = size - 1;
89 b->ready_buf.entries = (void *)&b[1];
90 b->order_buf.entries = RTE_PTR_ADD(&b[1],
91 size * sizeof(b->ready_buf.entries[0]));
96 struct rte_reorder_buffer*
97 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
99 struct rte_reorder_buffer *b = NULL;
100 struct rte_tailq_entry *te;
101 struct rte_reorder_list *reorder_list;
102 const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
103 (2 * size * sizeof(struct rte_mbuf *));
105 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
107 /* Check user arguments. */
108 if (!rte_is_power_of_2(size)) {
109 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
110 " - Not a power of 2\n");
115 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
121 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
123 /* guarantee there's no existing */
124 TAILQ_FOREACH(te, reorder_list, next) {
125 b = (struct rte_reorder_buffer *) te->data;
126 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
132 /* allocate tailq entry */
133 te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
135 RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n");
141 /* Allocate memory to store the reorder buffer structure. */
142 b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id);
144 RTE_LOG(ERR, REORDER, "Memzone allocation failed\n");
148 rte_reorder_init(b, bufsize, name, size);
149 te->data = (void *)b;
150 TAILQ_INSERT_TAIL(reorder_list, te, next);
154 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
159 rte_reorder_reset(struct rte_reorder_buffer *b)
161 char name[RTE_REORDER_NAMESIZE];
163 rte_reorder_free_mbufs(b);
164 snprintf(name, sizeof(name), "%s", b->name);
165 /* No error checking as current values should be valid */
166 rte_reorder_init(b, b->memsize, name, b->order_buf.size);
170 rte_reorder_free_mbufs(struct rte_reorder_buffer *b)
174 /* Free up the mbufs of order buffer & ready buffer */
175 for (i = 0; i < b->order_buf.size; i++) {
176 if (b->order_buf.entries[i])
177 rte_pktmbuf_free(b->order_buf.entries[i]);
178 if (b->ready_buf.entries[i])
179 rte_pktmbuf_free(b->ready_buf.entries[i]);
184 rte_reorder_free(struct rte_reorder_buffer *b)
186 struct rte_reorder_list *reorder_list;
187 struct rte_tailq_entry *te;
189 /* Check user arguments. */
193 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
195 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
197 /* find our tailq entry */
198 TAILQ_FOREACH(te, reorder_list, next) {
199 if (te->data == (void *) b)
203 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
207 TAILQ_REMOVE(reorder_list, te, next);
209 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
211 rte_reorder_free_mbufs(b);
217 struct rte_reorder_buffer *
218 rte_reorder_find_existing(const char *name)
220 struct rte_reorder_buffer *b = NULL;
221 struct rte_tailq_entry *te;
222 struct rte_reorder_list *reorder_list;
224 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
226 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
227 TAILQ_FOREACH(te, reorder_list, next) {
228 b = (struct rte_reorder_buffer *) te->data;
229 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
232 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
243 rte_reorder_fill_overflow(struct rte_reorder_buffer *b, unsigned n)
246 * 1. Move all ready entries that fit to the ready_buf
247 * 2. check if we meet the minimum needed (n).
248 * 3. If not, then skip any gaps and keep moving.
249 * 4. If at any point the ready buffer is full, stop
250 * 5. Return the number of positions the order_buf head has moved
253 struct cir_buffer *order_buf = &b->order_buf,
254 *ready_buf = &b->ready_buf;
256 unsigned int order_head_adv = 0;
259 * move at least n packets to ready buffer, assuming ready buffer
260 * has room for those packets.
262 while (order_head_adv < n &&
263 ((ready_buf->head + 1) & ready_buf->mask) != ready_buf->tail) {
265 /* if we are blocked waiting on a packet, skip it */
266 if (order_buf->entries[order_buf->head] == NULL) {
267 order_buf->head = (order_buf->head + 1) & order_buf->mask;
271 /* Move all ready entries that fit to the ready_buf */
272 while (order_buf->entries[order_buf->head] != NULL) {
273 ready_buf->entries[ready_buf->head] =
274 order_buf->entries[order_buf->head];
276 order_buf->entries[order_buf->head] = NULL;
279 order_buf->head = (order_buf->head + 1) & order_buf->mask;
281 if (((ready_buf->head + 1) & ready_buf->mask) == ready_buf->tail)
284 ready_buf->head = (ready_buf->head + 1) & ready_buf->mask;
288 b->min_seqn += order_head_adv;
289 /* Return the number of positions the order_buf head has moved */
290 return order_head_adv;
294 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf)
296 uint32_t offset, position;
297 struct cir_buffer *order_buf = &b->order_buf;
299 if (!b->is_initialized) {
300 b->min_seqn = mbuf->seqn;
301 b->is_initialized = 1;
305 * calculate the offset from the head pointer we need to go.
306 * The subtraction takes care of the sequence number wrapping.
307 * For example (using 16-bit for brevity):
310 * offset = 0x0010 - 0xFFFD = 0x13
312 offset = mbuf->seqn - b->min_seqn;
315 * action to take depends on offset.
316 * offset < buffer->size: the mbuf fits within the current window of
317 * sequence numbers we can reorder. EXPECTED CASE.
318 * offset > buffer->size: the mbuf is outside the current window. There
319 * are a number of cases to consider:
320 * 1. The packet sequence is just outside the window, then we need
321 * to see about shifting the head pointer and taking any ready
322 * to return packets out of the ring. If there was a delayed
323 * or dropped packet preventing drains from shifting the window
324 * this case will skip over the dropped packet instead, and any
325 * packets dequeued here will be returned on the next drain call.
326 * 2. The packet sequence number is vastly outside our window, taken
327 * here as having offset greater than twice the buffer size. In
328 * this case, the packet is probably an old or late packet that
329 * was previously skipped, so just enqueue the packet for
330 * immediate return on the next drain call, or else return error.
332 if (offset < b->order_buf.size) {
333 position = (order_buf->head + offset) & order_buf->mask;
334 order_buf->entries[position] = mbuf;
335 } else if (offset < 2 * b->order_buf.size) {
336 if (rte_reorder_fill_overflow(b, offset + 1 - order_buf->size)
337 < (offset + 1 - order_buf->size)) {
338 /* Put in handling for enqueue straight to output */
342 offset = mbuf->seqn - b->min_seqn;
343 position = (order_buf->head + offset) & order_buf->mask;
344 order_buf->entries[position] = mbuf;
346 /* Put in handling for enqueue straight to output */
354 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs,
357 unsigned int drain_cnt = 0;
359 struct cir_buffer *order_buf = &b->order_buf,
360 *ready_buf = &b->ready_buf;
362 /* Try to fetch requested number of mbufs from ready buffer */
363 while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) {
364 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail];
365 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask;
369 * If requested number of buffers not fetched from ready buffer, fetch
370 * remaining buffers from order buffer
372 while ((drain_cnt < max_mbufs) &&
373 (order_buf->entries[order_buf->head] != NULL)) {
374 mbufs[drain_cnt++] = order_buf->entries[order_buf->head];
375 order_buf->entries[order_buf->head] = NULL;
377 order_buf->head = (order_buf->head + 1) & order_buf->mask;