1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
8 #include <rte_string_fns.h>
11 #include <rte_eal_memconfig.h>
12 #include <rte_errno.h>
13 #include <rte_malloc.h>
14 #include <rte_tailq.h>
16 #include "rte_reorder.h"
18 TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
20 static struct rte_tailq_elem rte_reorder_tailq = {
21 .name = "RTE_REORDER",
23 EAL_REGISTER_TAILQ(rte_reorder_tailq)
26 #define RTE_REORDER_PREFIX "RO_"
27 #define RTE_REORDER_NAMESIZE 32
29 /* Macros for printing using RTE_LOG */
30 #define RTE_LOGTYPE_REORDER RTE_LOGTYPE_USER1
32 /* A generic circular buffer */
34 unsigned int size; /**< Number of entries that can be stored */
35 unsigned int mask; /**< [buffer_size - 1]: used for wrap-around */
36 unsigned int head; /**< insertion point in buffer */
37 unsigned int tail; /**< extraction point in buffer */
38 struct rte_mbuf **entries;
39 } __rte_cache_aligned;
41 /* The reorder buffer data structure itself */
42 struct rte_reorder_buffer {
43 char name[RTE_REORDER_NAMESIZE];
44 uint32_t min_seqn; /**< Lowest seq. number that can be in the buffer */
45 unsigned int memsize; /**< memory area size of reorder buffer */
46 struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
47 struct cir_buffer order_buf; /**< buffer used to reorder entries */
49 } __rte_cache_aligned;
52 rte_reorder_free_mbufs(struct rte_reorder_buffer *b);
54 struct rte_reorder_buffer *
55 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
56 const char *name, unsigned int size)
58 const unsigned int min_bufsize = sizeof(*b) +
59 (2 * size * sizeof(struct rte_mbuf *));
62 RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:"
67 if (!rte_is_power_of_2(size)) {
68 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
69 " - Not a power of 2\n");
74 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
79 if (bufsize < min_bufsize) {
80 RTE_LOG(ERR, REORDER, "Invalid reorder buffer memory size: %u, "
81 "minimum required: %u\n", bufsize, min_bufsize);
86 memset(b, 0, bufsize);
87 strlcpy(b->name, name, sizeof(b->name));
89 b->order_buf.size = b->ready_buf.size = size;
90 b->order_buf.mask = b->ready_buf.mask = size - 1;
91 b->ready_buf.entries = (void *)&b[1];
92 b->order_buf.entries = RTE_PTR_ADD(&b[1],
93 size * sizeof(b->ready_buf.entries[0]));
98 struct rte_reorder_buffer*
99 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
101 struct rte_reorder_buffer *b = NULL;
102 struct rte_tailq_entry *te;
103 struct rte_reorder_list *reorder_list;
104 const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
105 (2 * size * sizeof(struct rte_mbuf *));
107 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
109 /* Check user arguments. */
110 if (!rte_is_power_of_2(size)) {
111 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
112 " - Not a power of 2\n");
117 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
123 rte_mcfg_tailq_write_lock();
125 /* guarantee there's no existing */
126 TAILQ_FOREACH(te, reorder_list, next) {
127 b = (struct rte_reorder_buffer *) te->data;
128 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
134 /* allocate tailq entry */
135 te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
137 RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n");
143 /* Allocate memory to store the reorder buffer structure. */
144 b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id);
146 RTE_LOG(ERR, REORDER, "Memzone allocation failed\n");
150 rte_reorder_init(b, bufsize, name, size);
151 te->data = (void *)b;
152 TAILQ_INSERT_TAIL(reorder_list, te, next);
156 rte_mcfg_tailq_write_unlock();
161 rte_reorder_reset(struct rte_reorder_buffer *b)
163 char name[RTE_REORDER_NAMESIZE];
165 rte_reorder_free_mbufs(b);
166 strlcpy(name, b->name, sizeof(name));
167 /* No error checking as current values should be valid */
168 rte_reorder_init(b, b->memsize, name, b->order_buf.size);
172 rte_reorder_free_mbufs(struct rte_reorder_buffer *b)
176 /* Free up the mbufs of order buffer & ready buffer */
177 for (i = 0; i < b->order_buf.size; i++) {
178 if (b->order_buf.entries[i])
179 rte_pktmbuf_free(b->order_buf.entries[i]);
180 if (b->ready_buf.entries[i])
181 rte_pktmbuf_free(b->ready_buf.entries[i]);
186 rte_reorder_free(struct rte_reorder_buffer *b)
188 struct rte_reorder_list *reorder_list;
189 struct rte_tailq_entry *te;
191 /* Check user arguments. */
195 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
197 rte_mcfg_tailq_write_lock();
199 /* find our tailq entry */
200 TAILQ_FOREACH(te, reorder_list, next) {
201 if (te->data == (void *) b)
205 rte_mcfg_tailq_write_unlock();
209 TAILQ_REMOVE(reorder_list, te, next);
211 rte_mcfg_tailq_write_unlock();
213 rte_reorder_free_mbufs(b);
219 struct rte_reorder_buffer *
220 rte_reorder_find_existing(const char *name)
222 struct rte_reorder_buffer *b = NULL;
223 struct rte_tailq_entry *te;
224 struct rte_reorder_list *reorder_list;
231 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
233 rte_mcfg_tailq_read_lock();
234 TAILQ_FOREACH(te, reorder_list, next) {
235 b = (struct rte_reorder_buffer *) te->data;
236 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
239 rte_mcfg_tailq_read_unlock();
250 rte_reorder_fill_overflow(struct rte_reorder_buffer *b, unsigned n)
253 * 1. Move all ready entries that fit to the ready_buf
254 * 2. check if we meet the minimum needed (n).
255 * 3. If not, then skip any gaps and keep moving.
256 * 4. If at any point the ready buffer is full, stop
257 * 5. Return the number of positions the order_buf head has moved
260 struct cir_buffer *order_buf = &b->order_buf,
261 *ready_buf = &b->ready_buf;
263 unsigned int order_head_adv = 0;
266 * move at least n packets to ready buffer, assuming ready buffer
267 * has room for those packets.
269 while (order_head_adv < n &&
270 ((ready_buf->head + 1) & ready_buf->mask) != ready_buf->tail) {
272 /* if we are blocked waiting on a packet, skip it */
273 if (order_buf->entries[order_buf->head] == NULL) {
274 order_buf->head = (order_buf->head + 1) & order_buf->mask;
278 /* Move all ready entries that fit to the ready_buf */
279 while (order_buf->entries[order_buf->head] != NULL) {
280 ready_buf->entries[ready_buf->head] =
281 order_buf->entries[order_buf->head];
283 order_buf->entries[order_buf->head] = NULL;
286 order_buf->head = (order_buf->head + 1) & order_buf->mask;
288 if (((ready_buf->head + 1) & ready_buf->mask) == ready_buf->tail)
291 ready_buf->head = (ready_buf->head + 1) & ready_buf->mask;
295 b->min_seqn += order_head_adv;
296 /* Return the number of positions the order_buf head has moved */
297 return order_head_adv;
301 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf)
303 uint32_t offset, position;
304 struct cir_buffer *order_buf;
306 if (b == NULL || mbuf == NULL) {
311 order_buf = &b->order_buf;
312 if (!b->is_initialized) {
313 b->min_seqn = mbuf->seqn;
314 b->is_initialized = 1;
318 * calculate the offset from the head pointer we need to go.
319 * The subtraction takes care of the sequence number wrapping.
320 * For example (using 16-bit for brevity):
323 * offset = 0x0010 - 0xFFFD = 0x13
325 offset = mbuf->seqn - b->min_seqn;
328 * action to take depends on offset.
329 * offset < buffer->size: the mbuf fits within the current window of
330 * sequence numbers we can reorder. EXPECTED CASE.
331 * offset > buffer->size: the mbuf is outside the current window. There
332 * are a number of cases to consider:
333 * 1. The packet sequence is just outside the window, then we need
334 * to see about shifting the head pointer and taking any ready
335 * to return packets out of the ring. If there was a delayed
336 * or dropped packet preventing drains from shifting the window
337 * this case will skip over the dropped packet instead, and any
338 * packets dequeued here will be returned on the next drain call.
339 * 2. The packet sequence number is vastly outside our window, taken
340 * here as having offset greater than twice the buffer size. In
341 * this case, the packet is probably an old or late packet that
342 * was previously skipped, so just enqueue the packet for
343 * immediate return on the next drain call, or else return error.
345 if (offset < b->order_buf.size) {
346 position = (order_buf->head + offset) & order_buf->mask;
347 order_buf->entries[position] = mbuf;
348 } else if (offset < 2 * b->order_buf.size) {
349 if (rte_reorder_fill_overflow(b, offset + 1 - order_buf->size)
350 < (offset + 1 - order_buf->size)) {
351 /* Put in handling for enqueue straight to output */
355 offset = mbuf->seqn - b->min_seqn;
356 position = (order_buf->head + offset) & order_buf->mask;
357 order_buf->entries[position] = mbuf;
359 /* Put in handling for enqueue straight to output */
367 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs,
370 unsigned int drain_cnt = 0;
372 struct cir_buffer *order_buf = &b->order_buf,
373 *ready_buf = &b->ready_buf;
375 /* Try to fetch requested number of mbufs from ready buffer */
376 while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) {
377 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail];
378 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask;
382 * If requested number of buffers not fetched from ready buffer, fetch
383 * remaining buffers from order buffer
385 while ((drain_cnt < max_mbufs) &&
386 (order_buf->entries[order_buf->head] != NULL)) {
387 mbufs[drain_cnt++] = order_buf->entries[order_buf->head];
388 order_buf->entries[order_buf->head] = NULL;
390 order_buf->head = (order_buf->head + 1) & order_buf->mask;