mem: fix return code of freeing segment on failure
[dpdk.git] / lib / librte_reorder / rte_reorder.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <inttypes.h>
6 #include <string.h>
7
8 #include <rte_log.h>
9 #include <rte_mbuf.h>
10 #include <rte_eal_memconfig.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13
14 #include "rte_reorder.h"
15
16 TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
17
18 static struct rte_tailq_elem rte_reorder_tailq = {
19         .name = "RTE_REORDER",
20 };
21 EAL_REGISTER_TAILQ(rte_reorder_tailq)
22
23 #define NO_FLAGS 0
24 #define RTE_REORDER_PREFIX "RO_"
25 #define RTE_REORDER_NAMESIZE 32
26
27 /* Macros for printing using RTE_LOG */
28 #define RTE_LOGTYPE_REORDER     RTE_LOGTYPE_USER1
29
30 /* A generic circular buffer */
31 struct cir_buffer {
32         unsigned int size;   /**< Number of entries that can be stored */
33         unsigned int mask;   /**< [buffer_size - 1]: used for wrap-around */
34         unsigned int head;   /**< insertion point in buffer */
35         unsigned int tail;   /**< extraction point in buffer */
36         struct rte_mbuf **entries;
37 } __rte_cache_aligned;
38
39 /* The reorder buffer data structure itself */
40 struct rte_reorder_buffer {
41         char name[RTE_REORDER_NAMESIZE];
42         uint32_t min_seqn;  /**< Lowest seq. number that can be in the buffer */
43         unsigned int memsize; /**< memory area size of reorder buffer */
44         struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
45         struct cir_buffer order_buf; /**< buffer used to reorder entries */
46         int is_initialized;
47 } __rte_cache_aligned;
48
49 static void
50 rte_reorder_free_mbufs(struct rte_reorder_buffer *b);
51
52 struct rte_reorder_buffer *
53 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
54                 const char *name, unsigned int size)
55 {
56         const unsigned int min_bufsize = sizeof(*b) +
57                                         (2 * size * sizeof(struct rte_mbuf *));
58
59         if (b == NULL) {
60                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:"
61                                         " NULL\n");
62                 rte_errno = EINVAL;
63                 return NULL;
64         }
65         if (!rte_is_power_of_2(size)) {
66                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
67                                 " - Not a power of 2\n");
68                 rte_errno = EINVAL;
69                 return NULL;
70         }
71         if (name == NULL) {
72                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
73                                         " NULL\n");
74                 rte_errno = EINVAL;
75                 return NULL;
76         }
77         if (bufsize < min_bufsize) {
78                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer memory size: %u, "
79                         "minimum required: %u\n", bufsize, min_bufsize);
80                 rte_errno = EINVAL;
81                 return NULL;
82         }
83
84         memset(b, 0, bufsize);
85         snprintf(b->name, sizeof(b->name), "%s", name);
86         b->memsize = bufsize;
87         b->order_buf.size = b->ready_buf.size = size;
88         b->order_buf.mask = b->ready_buf.mask = size - 1;
89         b->ready_buf.entries = (void *)&b[1];
90         b->order_buf.entries = RTE_PTR_ADD(&b[1],
91                         size * sizeof(b->ready_buf.entries[0]));
92
93         return b;
94 }
95
96 struct rte_reorder_buffer*
97 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
98 {
99         struct rte_reorder_buffer *b = NULL;
100         struct rte_tailq_entry *te;
101         struct rte_reorder_list *reorder_list;
102         const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
103                                         (2 * size * sizeof(struct rte_mbuf *));
104
105         reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
106
107         /* Check user arguments. */
108         if (!rte_is_power_of_2(size)) {
109                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
110                                 " - Not a power of 2\n");
111                 rte_errno = EINVAL;
112                 return NULL;
113         }
114         if (name == NULL) {
115                 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
116                                         " NULL\n");
117                 rte_errno = EINVAL;
118                 return NULL;
119         }
120
121         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
122
123         /* guarantee there's no existing */
124         TAILQ_FOREACH(te, reorder_list, next) {
125                 b = (struct rte_reorder_buffer *) te->data;
126                 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
127                         break;
128         }
129         if (te != NULL)
130                 goto exit;
131
132         /* allocate tailq entry */
133         te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
134         if (te == NULL) {
135                 RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n");
136                 rte_errno = ENOMEM;
137                 b = NULL;
138                 goto exit;
139         }
140
141         /* Allocate memory to store the reorder buffer structure. */
142         b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id);
143         if (b == NULL) {
144                 RTE_LOG(ERR, REORDER, "Memzone allocation failed\n");
145                 rte_errno = ENOMEM;
146                 rte_free(te);
147         } else {
148                 rte_reorder_init(b, bufsize, name, size);
149                 te->data = (void *)b;
150                 TAILQ_INSERT_TAIL(reorder_list, te, next);
151         }
152
153 exit:
154         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
155         return b;
156 }
157
158 void
159 rte_reorder_reset(struct rte_reorder_buffer *b)
160 {
161         char name[RTE_REORDER_NAMESIZE];
162
163         rte_reorder_free_mbufs(b);
164         snprintf(name, sizeof(name), "%s", b->name);
165         /* No error checking as current values should be valid */
166         rte_reorder_init(b, b->memsize, name, b->order_buf.size);
167 }
168
169 static void
170 rte_reorder_free_mbufs(struct rte_reorder_buffer *b)
171 {
172         unsigned i;
173
174         /* Free up the mbufs of order buffer & ready buffer */
175         for (i = 0; i < b->order_buf.size; i++) {
176                 if (b->order_buf.entries[i])
177                         rte_pktmbuf_free(b->order_buf.entries[i]);
178                 if (b->ready_buf.entries[i])
179                         rte_pktmbuf_free(b->ready_buf.entries[i]);
180         }
181 }
182
183 void
184 rte_reorder_free(struct rte_reorder_buffer *b)
185 {
186         struct rte_reorder_list *reorder_list;
187         struct rte_tailq_entry *te;
188
189         /* Check user arguments. */
190         if (b == NULL)
191                 return;
192
193         reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
194
195         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
196
197         /* find our tailq entry */
198         TAILQ_FOREACH(te, reorder_list, next) {
199                 if (te->data == (void *) b)
200                         break;
201         }
202         if (te == NULL) {
203                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
204                 return;
205         }
206
207         TAILQ_REMOVE(reorder_list, te, next);
208
209         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
210
211         rte_reorder_free_mbufs(b);
212
213         rte_free(b);
214         rte_free(te);
215 }
216
217 struct rte_reorder_buffer *
218 rte_reorder_find_existing(const char *name)
219 {
220         struct rte_reorder_buffer *b = NULL;
221         struct rte_tailq_entry *te;
222         struct rte_reorder_list *reorder_list;
223
224         reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
225
226         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
227         TAILQ_FOREACH(te, reorder_list, next) {
228                 b = (struct rte_reorder_buffer *) te->data;
229                 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
230                         break;
231         }
232         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
233
234         if (te == NULL) {
235                 rte_errno = ENOENT;
236                 return NULL;
237         }
238
239         return b;
240 }
241
242 static unsigned
243 rte_reorder_fill_overflow(struct rte_reorder_buffer *b, unsigned n)
244 {
245         /*
246          * 1. Move all ready entries that fit to the ready_buf
247          * 2. check if we meet the minimum needed (n).
248          * 3. If not, then skip any gaps and keep moving.
249          * 4. If at any point the ready buffer is full, stop
250          * 5. Return the number of positions the order_buf head has moved
251          */
252
253         struct cir_buffer *order_buf = &b->order_buf,
254                         *ready_buf = &b->ready_buf;
255
256         unsigned int order_head_adv = 0;
257
258         /*
259          * move at least n packets to ready buffer, assuming ready buffer
260          * has room for those packets.
261          */
262         while (order_head_adv < n &&
263                         ((ready_buf->head + 1) & ready_buf->mask) != ready_buf->tail) {
264
265                 /* if we are blocked waiting on a packet, skip it */
266                 if (order_buf->entries[order_buf->head] == NULL) {
267                         order_buf->head = (order_buf->head + 1) & order_buf->mask;
268                         order_head_adv++;
269                 }
270
271                 /* Move all ready entries that fit to the ready_buf */
272                 while (order_buf->entries[order_buf->head] != NULL) {
273                         ready_buf->entries[ready_buf->head] =
274                                         order_buf->entries[order_buf->head];
275
276                         order_buf->entries[order_buf->head] = NULL;
277                         order_head_adv++;
278
279                         order_buf->head = (order_buf->head + 1) & order_buf->mask;
280
281                         if (((ready_buf->head + 1) & ready_buf->mask) == ready_buf->tail)
282                                 break;
283
284                         ready_buf->head = (ready_buf->head + 1) & ready_buf->mask;
285                 }
286         }
287
288         b->min_seqn += order_head_adv;
289         /* Return the number of positions the order_buf head has moved */
290         return order_head_adv;
291 }
292
293 int
294 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf)
295 {
296         uint32_t offset, position;
297         struct cir_buffer *order_buf = &b->order_buf;
298
299         if (!b->is_initialized) {
300                 b->min_seqn = mbuf->seqn;
301                 b->is_initialized = 1;
302         }
303
304         /*
305          * calculate the offset from the head pointer we need to go.
306          * The subtraction takes care of the sequence number wrapping.
307          * For example (using 16-bit for brevity):
308          *      min_seqn  = 0xFFFD
309          *      mbuf_seqn = 0x0010
310          *      offset    = 0x0010 - 0xFFFD = 0x13
311          */
312         offset = mbuf->seqn - b->min_seqn;
313
314         /*
315          * action to take depends on offset.
316          * offset < buffer->size: the mbuf fits within the current window of
317          *    sequence numbers we can reorder. EXPECTED CASE.
318          * offset > buffer->size: the mbuf is outside the current window. There
319          *    are a number of cases to consider:
320          *    1. The packet sequence is just outside the window, then we need
321          *       to see about shifting the head pointer and taking any ready
322          *       to return packets out of the ring. If there was a delayed
323          *       or dropped packet preventing drains from shifting the window
324          *       this case will skip over the dropped packet instead, and any
325          *       packets dequeued here will be returned on the next drain call.
326          *    2. The packet sequence number is vastly outside our window, taken
327          *       here as having offset greater than twice the buffer size. In
328          *       this case, the packet is probably an old or late packet that
329          *       was previously skipped, so just enqueue the packet for
330          *       immediate return on the next drain call, or else return error.
331          */
332         if (offset < b->order_buf.size) {
333                 position = (order_buf->head + offset) & order_buf->mask;
334                 order_buf->entries[position] = mbuf;
335         } else if (offset < 2 * b->order_buf.size) {
336                 if (rte_reorder_fill_overflow(b, offset + 1 - order_buf->size)
337                                 < (offset + 1 - order_buf->size)) {
338                         /* Put in handling for enqueue straight to output */
339                         rte_errno = ENOSPC;
340                         return -1;
341                 }
342                 offset = mbuf->seqn - b->min_seqn;
343                 position = (order_buf->head + offset) & order_buf->mask;
344                 order_buf->entries[position] = mbuf;
345         } else {
346                 /* Put in handling for enqueue straight to output */
347                 rte_errno = ERANGE;
348                 return -1;
349         }
350         return 0;
351 }
352
353 unsigned int
354 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs,
355                 unsigned max_mbufs)
356 {
357         unsigned int drain_cnt = 0;
358
359         struct cir_buffer *order_buf = &b->order_buf,
360                         *ready_buf = &b->ready_buf;
361
362         /* Try to fetch requested number of mbufs from ready buffer */
363         while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) {
364                 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail];
365                 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask;
366         }
367
368         /*
369          * If requested number of buffers not fetched from ready buffer, fetch
370          * remaining buffers from order buffer
371          */
372         while ((drain_cnt < max_mbufs) &&
373                         (order_buf->entries[order_buf->head] != NULL)) {
374                 mbufs[drain_cnt++] = order_buf->entries[order_buf->head];
375                 order_buf->entries[order_buf->head] = NULL;
376                 b->min_seqn++;
377                 order_buf->head = (order_buf->head + 1) & order_buf->mask;
378         }
379
380         return drain_cnt;
381 }