4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
38 #include <rte_memory.h>
39 #include <rte_memzone.h>
40 #include <rte_errno.h>
41 #include <rte_string_fns.h>
42 #include <rte_tailq.h>
43 #include <rte_eal_memconfig.h>
44 #include "rte_distributor.h"
47 #define RTE_DISTRIB_PREFIX "DT_"
49 /* we will use the bottom four bits of pointer for flags, shifting out
50 * the top four bits to make room (since a 64-bit pointer actually only uses
51 * 48 bits). An arithmetic-right-shift will then appropriately restore the
52 * original pointer value with proper sign extension into the top bits. */
53 #define RTE_DISTRIB_FLAG_BITS 4
54 #define RTE_DISTRIB_FLAGS_MASK (0x0F)
55 #define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
56 #define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
57 #define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
59 #define RTE_DISTRIB_BACKLOG_SIZE 8
60 #define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
62 #define RTE_DISTRIB_MAX_RETURNS 128
63 #define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
66 * Maximum number of workers allowed.
67 * Be aware of increasing the limit, becaus it is limited by how we track
68 * in-flight tags. See @in_flight_bitmask and @rte_distributor_process
70 #define RTE_DISTRIB_MAX_WORKERS 64
73 * Buffer structure used to pass the pointer data between cores. This is cache
74 * line aligned, but to improve performance and prevent adjacent cache-line
75 * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
76 * the next cache line to worker 0, we pad this out to three cache lines.
77 * Only 64-bits of the memory is actually used though.
79 union rte_distributor_buffer {
80 volatile int64_t bufptr64;
81 char pad[RTE_CACHE_LINE_SIZE*3];
82 } __rte_cache_aligned;
84 struct rte_distributor_backlog {
87 int64_t pkts[RTE_DISTRIB_BACKLOG_SIZE];
90 struct rte_distributor_returned_pkts {
93 struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
96 struct rte_distributor {
97 TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
99 char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
100 unsigned num_workers; /**< Number of workers polling */
102 uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
103 /**< Tracks the tag being processed per core */
104 uint64_t in_flight_bitmask;
105 /**< on/off bits for in-flight tags.
106 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
107 * the bitmask has to expand.
110 struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
112 union rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
114 struct rte_distributor_returned_pkts returns;
117 TAILQ_HEAD(rte_distributor_list, rte_distributor);
119 /**** APIs called by workers ****/
122 rte_distributor_request_pkt(struct rte_distributor *d,
123 unsigned worker_id, struct rte_mbuf *oldpkt)
125 union rte_distributor_buffer *buf = &d->bufs[worker_id];
126 int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
127 | RTE_DISTRIB_GET_BUF;
128 while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
134 rte_distributor_poll_pkt(struct rte_distributor *d,
137 union rte_distributor_buffer *buf = &d->bufs[worker_id];
138 if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
141 /* since bufptr64 is signed, this should be an arithmetic shift */
142 int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
143 return (struct rte_mbuf *)((uintptr_t)ret);
147 rte_distributor_get_pkt(struct rte_distributor *d,
148 unsigned worker_id, struct rte_mbuf *oldpkt)
150 struct rte_mbuf *ret;
151 rte_distributor_request_pkt(d, worker_id, oldpkt);
152 while ((ret = rte_distributor_poll_pkt(d, worker_id)) == NULL)
158 rte_distributor_return_pkt(struct rte_distributor *d,
159 unsigned worker_id, struct rte_mbuf *oldpkt)
161 union rte_distributor_buffer *buf = &d->bufs[worker_id];
162 uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
163 | RTE_DISTRIB_RETURN_BUF;
168 /**** APIs called on distributor core ***/
170 /* as name suggests, adds a packet to the backlog for a particular worker */
172 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
174 if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
177 bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
182 /* takes the next packet for a worker off the backlog */
184 backlog_pop(struct rte_distributor_backlog *bl)
187 return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
190 /* stores a packet returned from a worker inside the returns array */
192 store_return(uintptr_t oldbuf, struct rte_distributor *d,
193 unsigned *ret_start, unsigned *ret_count)
195 /* store returns in a circular buffer - code is branch-free */
196 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
198 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
199 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
203 handle_worker_shutdown(struct rte_distributor *d, unsigned wkr)
205 d->in_flight_tags[wkr] = 0;
206 d->in_flight_bitmask &= ~(1UL << wkr);
207 d->bufs[wkr].bufptr64 = 0;
208 if (unlikely(d->backlog[wkr].count != 0)) {
209 /* On return of a packet, we need to move the
210 * queued packets for this core elsewhere.
211 * Easiest solution is to set things up for
212 * a recursive call. That will cause those
213 * packets to be queued up for the next free
214 * core, i.e. it will return as soon as a
215 * core becomes free to accept the first
216 * packet, as subsequent ones will be added to
217 * the backlog for that core.
219 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
221 struct rte_distributor_backlog *bl = &d->backlog[wkr];
223 for (i = 0; i < bl->count; i++) {
224 unsigned idx = (bl->start + i) &
225 RTE_DISTRIB_BACKLOG_MASK;
226 pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
227 RTE_DISTRIB_FLAG_BITS));
230 * Note that the tags were set before first level call
231 * to rte_distributor_process.
233 rte_distributor_process(d, pkts, i);
234 bl->count = bl->start = 0;
238 /* this function is called when process() fn is called without any new
239 * packets. It goes through all the workers and clears any returned packets
240 * to do a partial flush.
243 process_returns(struct rte_distributor *d)
246 unsigned flushed = 0;
247 unsigned ret_start = d->returns.start,
248 ret_count = d->returns.count;
250 for (wkr = 0; wkr < d->num_workers; wkr++) {
252 const int64_t data = d->bufs[wkr].bufptr64;
253 uintptr_t oldbuf = 0;
255 if (data & RTE_DISTRIB_GET_BUF) {
257 if (d->backlog[wkr].count)
258 d->bufs[wkr].bufptr64 =
259 backlog_pop(&d->backlog[wkr]);
261 d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
262 d->in_flight_tags[wkr] = 0;
263 d->in_flight_bitmask &= ~(1UL << wkr);
265 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
266 } else if (data & RTE_DISTRIB_RETURN_BUF) {
267 handle_worker_shutdown(d, wkr);
268 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
271 store_return(oldbuf, d, &ret_start, &ret_count);
274 d->returns.start = ret_start;
275 d->returns.count = ret_count;
280 /* process a set of packets to distribute them to workers */
282 rte_distributor_process(struct rte_distributor *d,
283 struct rte_mbuf **mbufs, unsigned num_mbufs)
285 unsigned next_idx = 0;
287 struct rte_mbuf *next_mb = NULL;
288 int64_t next_value = 0;
289 uint32_t new_tag = 0;
290 unsigned ret_start = d->returns.start,
291 ret_count = d->returns.count;
293 if (unlikely(num_mbufs == 0))
294 return process_returns(d);
296 while (next_idx < num_mbufs || next_mb != NULL) {
298 int64_t data = d->bufs[wkr].bufptr64;
299 uintptr_t oldbuf = 0;
302 next_mb = mbufs[next_idx++];
303 next_value = (((int64_t)(uintptr_t)next_mb)
304 << RTE_DISTRIB_FLAG_BITS);
306 * User is advocated to set tag vaue for each
307 * mbuf before calling rte_distributor_process.
308 * User defined tags are used to identify flows,
311 new_tag = next_mb->hash.usr;
314 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
315 * then the size of match has to be expanded.
320 * to scan for a match use "xor" and "not" to get a 0/1
321 * value, then use shifting to merge to single "match"
322 * variable, where a one-bit indicates a match for the
323 * worker given by the bit-position
325 for (i = 0; i < d->num_workers; i++)
326 match |= (!(d->in_flight_tags[i] ^ new_tag)
329 /* Only turned-on bits are considered as match */
330 match &= d->in_flight_bitmask;
334 unsigned worker = __builtin_ctzl(match);
335 if (add_to_backlog(&d->backlog[worker],
341 if ((data & RTE_DISTRIB_GET_BUF) &&
342 (d->backlog[wkr].count || next_mb)) {
344 if (d->backlog[wkr].count)
345 d->bufs[wkr].bufptr64 =
346 backlog_pop(&d->backlog[wkr]);
349 d->bufs[wkr].bufptr64 = next_value;
350 d->in_flight_tags[wkr] = new_tag;
351 d->in_flight_bitmask |= (1UL << wkr);
354 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
355 } else if (data & RTE_DISTRIB_RETURN_BUF) {
356 handle_worker_shutdown(d, wkr);
357 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
360 /* store returns in a circular buffer */
361 store_return(oldbuf, d, &ret_start, &ret_count);
363 if (++wkr == d->num_workers)
366 /* to finish, check all workers for backlog and schedule work for them
367 * if they are ready */
368 for (wkr = 0; wkr < d->num_workers; wkr++)
369 if (d->backlog[wkr].count &&
370 (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
372 int64_t oldbuf = d->bufs[wkr].bufptr64 >>
373 RTE_DISTRIB_FLAG_BITS;
374 store_return(oldbuf, d, &ret_start, &ret_count);
376 d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
379 d->returns.start = ret_start;
380 d->returns.count = ret_count;
384 /* return to the caller, packets returned from workers */
386 rte_distributor_returned_pkts(struct rte_distributor *d,
387 struct rte_mbuf **mbufs, unsigned max_mbufs)
389 struct rte_distributor_returned_pkts *returns = &d->returns;
390 unsigned retval = (max_mbufs < returns->count) ?
391 max_mbufs : returns->count;
394 for (i = 0; i < retval; i++) {
395 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
396 mbufs[i] = returns->mbufs[idx];
404 /* return the number of packets in-flight in a distributor, i.e. packets
405 * being workered on or queued up in a backlog. */
406 static inline unsigned
407 total_outstanding(const struct rte_distributor *d)
409 unsigned wkr, total_outstanding;
411 total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
413 for (wkr = 0; wkr < d->num_workers; wkr++)
414 total_outstanding += d->backlog[wkr].count;
416 return total_outstanding;
419 /* flush the distributor, so that there are no outstanding packets in flight or
422 rte_distributor_flush(struct rte_distributor *d)
424 const unsigned flushed = total_outstanding(d);
426 while (total_outstanding(d) > 0)
427 rte_distributor_process(d, NULL, 0);
432 /* clears the internal returns array in the distributor */
434 rte_distributor_clear_returns(struct rte_distributor *d)
436 d->returns.start = d->returns.count = 0;
438 memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
442 /* creates a distributor instance */
443 struct rte_distributor *
444 rte_distributor_create(const char *name,
446 unsigned num_workers)
448 struct rte_distributor *d;
449 struct rte_distributor_list *distributor_list;
450 char mz_name[RTE_MEMZONE_NAMESIZE];
451 const struct rte_memzone *mz;
453 /* compilation-time checks */
454 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
455 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
456 RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
457 sizeof(d->in_flight_bitmask) * CHAR_BIT);
459 if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
464 /* check that we have an initialised tail queue */
465 distributor_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_DISTRIBUTOR,
466 rte_distributor_list);
467 if (distributor_list == NULL) {
468 rte_errno = E_RTE_NO_TAILQ;
472 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
473 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
480 snprintf(d->name, sizeof(d->name), "%s", name);
481 d->num_workers = num_workers;
483 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
484 TAILQ_INSERT_TAIL(distributor_list, d, next);
485 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);