4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
38 #include <rte_memory.h>
39 #include <rte_memzone.h>
40 #include <rte_errno.h>
41 #include <rte_string_fns.h>
42 #include <rte_eal_memconfig.h>
43 #include "rte_distributor_v20.h"
44 #include "rte_distributor_private.h"
46 TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
48 static struct rte_tailq_elem rte_distributor_tailq = {
49 .name = "RTE_DISTRIBUTOR",
51 EAL_REGISTER_TAILQ(rte_distributor_tailq)
53 /**** APIs called by workers ****/
56 rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
57 unsigned worker_id, struct rte_mbuf *oldpkt)
59 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
60 int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
61 | RTE_DISTRIB_GET_BUF;
62 while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
68 rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
71 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
72 if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
75 /* since bufptr64 is signed, this should be an arithmetic shift */
76 int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
77 return (struct rte_mbuf *)((uintptr_t)ret);
81 rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
82 unsigned worker_id, struct rte_mbuf *oldpkt)
85 rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
86 while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
92 rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
93 unsigned worker_id, struct rte_mbuf *oldpkt)
95 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
96 uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
97 | RTE_DISTRIB_RETURN_BUF;
102 /**** APIs called on distributor core ***/
104 /* as name suggests, adds a packet to the backlog for a particular worker */
106 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
108 if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
111 bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
116 /* takes the next packet for a worker off the backlog */
118 backlog_pop(struct rte_distributor_backlog *bl)
121 return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
124 /* stores a packet returned from a worker inside the returns array */
126 store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
127 unsigned *ret_start, unsigned *ret_count)
129 /* store returns in a circular buffer - code is branch-free */
130 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
132 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
133 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
137 handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
139 d->in_flight_tags[wkr] = 0;
140 d->in_flight_bitmask &= ~(1UL << wkr);
141 d->bufs[wkr].bufptr64 = 0;
142 if (unlikely(d->backlog[wkr].count != 0)) {
143 /* On return of a packet, we need to move the
144 * queued packets for this core elsewhere.
145 * Easiest solution is to set things up for
146 * a recursive call. That will cause those
147 * packets to be queued up for the next free
148 * core, i.e. it will return as soon as a
149 * core becomes free to accept the first
150 * packet, as subsequent ones will be added to
151 * the backlog for that core.
153 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
155 struct rte_distributor_backlog *bl = &d->backlog[wkr];
157 for (i = 0; i < bl->count; i++) {
158 unsigned idx = (bl->start + i) &
159 RTE_DISTRIB_BACKLOG_MASK;
160 pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
161 RTE_DISTRIB_FLAG_BITS));
164 * Note that the tags were set before first level call
165 * to rte_distributor_process.
167 rte_distributor_process_v20(d, pkts, i);
168 bl->count = bl->start = 0;
172 /* this function is called when process() fn is called without any new
173 * packets. It goes through all the workers and clears any returned packets
174 * to do a partial flush.
177 process_returns(struct rte_distributor_v20 *d)
180 unsigned flushed = 0;
181 unsigned ret_start = d->returns.start,
182 ret_count = d->returns.count;
184 for (wkr = 0; wkr < d->num_workers; wkr++) {
186 const int64_t data = d->bufs[wkr].bufptr64;
187 uintptr_t oldbuf = 0;
189 if (data & RTE_DISTRIB_GET_BUF) {
191 if (d->backlog[wkr].count)
192 d->bufs[wkr].bufptr64 =
193 backlog_pop(&d->backlog[wkr]);
195 d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
196 d->in_flight_tags[wkr] = 0;
197 d->in_flight_bitmask &= ~(1UL << wkr);
199 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
200 } else if (data & RTE_DISTRIB_RETURN_BUF) {
201 handle_worker_shutdown(d, wkr);
202 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
205 store_return(oldbuf, d, &ret_start, &ret_count);
208 d->returns.start = ret_start;
209 d->returns.count = ret_count;
214 /* process a set of packets to distribute them to workers */
216 rte_distributor_process_v20(struct rte_distributor_v20 *d,
217 struct rte_mbuf **mbufs, unsigned num_mbufs)
219 unsigned next_idx = 0;
221 struct rte_mbuf *next_mb = NULL;
222 int64_t next_value = 0;
223 uint32_t new_tag = 0;
224 unsigned ret_start = d->returns.start,
225 ret_count = d->returns.count;
227 if (unlikely(num_mbufs == 0))
228 return process_returns(d);
230 while (next_idx < num_mbufs || next_mb != NULL) {
232 int64_t data = d->bufs[wkr].bufptr64;
233 uintptr_t oldbuf = 0;
236 next_mb = mbufs[next_idx++];
237 next_value = (((int64_t)(uintptr_t)next_mb)
238 << RTE_DISTRIB_FLAG_BITS);
240 * User is advocated to set tag vaue for each
241 * mbuf before calling rte_distributor_process.
242 * User defined tags are used to identify flows,
245 new_tag = next_mb->hash.usr;
248 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
249 * then the size of match has to be expanded.
254 * to scan for a match use "xor" and "not" to get a 0/1
255 * value, then use shifting to merge to single "match"
256 * variable, where a one-bit indicates a match for the
257 * worker given by the bit-position
259 for (i = 0; i < d->num_workers; i++)
260 match |= (!(d->in_flight_tags[i] ^ new_tag)
263 /* Only turned-on bits are considered as match */
264 match &= d->in_flight_bitmask;
268 unsigned worker = __builtin_ctzl(match);
269 if (add_to_backlog(&d->backlog[worker],
275 if ((data & RTE_DISTRIB_GET_BUF) &&
276 (d->backlog[wkr].count || next_mb)) {
278 if (d->backlog[wkr].count)
279 d->bufs[wkr].bufptr64 =
280 backlog_pop(&d->backlog[wkr]);
283 d->bufs[wkr].bufptr64 = next_value;
284 d->in_flight_tags[wkr] = new_tag;
285 d->in_flight_bitmask |= (1UL << wkr);
288 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
289 } else if (data & RTE_DISTRIB_RETURN_BUF) {
290 handle_worker_shutdown(d, wkr);
291 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
294 /* store returns in a circular buffer */
295 store_return(oldbuf, d, &ret_start, &ret_count);
297 if (++wkr == d->num_workers)
300 /* to finish, check all workers for backlog and schedule work for them
301 * if they are ready */
302 for (wkr = 0; wkr < d->num_workers; wkr++)
303 if (d->backlog[wkr].count &&
304 (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
306 int64_t oldbuf = d->bufs[wkr].bufptr64 >>
307 RTE_DISTRIB_FLAG_BITS;
308 store_return(oldbuf, d, &ret_start, &ret_count);
310 d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
313 d->returns.start = ret_start;
314 d->returns.count = ret_count;
318 /* return to the caller, packets returned from workers */
320 rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
321 struct rte_mbuf **mbufs, unsigned max_mbufs)
323 struct rte_distributor_returned_pkts *returns = &d->returns;
324 unsigned retval = (max_mbufs < returns->count) ?
325 max_mbufs : returns->count;
328 for (i = 0; i < retval; i++) {
329 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
330 mbufs[i] = returns->mbufs[idx];
338 /* return the number of packets in-flight in a distributor, i.e. packets
339 * being workered on or queued up in a backlog. */
340 static inline unsigned
341 total_outstanding(const struct rte_distributor_v20 *d)
343 unsigned wkr, total_outstanding;
345 total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
347 for (wkr = 0; wkr < d->num_workers; wkr++)
348 total_outstanding += d->backlog[wkr].count;
350 return total_outstanding;
353 /* flush the distributor, so that there are no outstanding packets in flight or
356 rte_distributor_flush_v20(struct rte_distributor_v20 *d)
358 const unsigned flushed = total_outstanding(d);
360 while (total_outstanding(d) > 0)
361 rte_distributor_process_v20(d, NULL, 0);
366 /* clears the internal returns array in the distributor */
368 rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
370 d->returns.start = d->returns.count = 0;
372 memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
376 /* creates a distributor instance */
377 struct rte_distributor_v20 *
378 rte_distributor_create_v20(const char *name,
380 unsigned num_workers)
382 struct rte_distributor_v20 *d;
383 struct rte_distributor_list *distributor_list;
384 char mz_name[RTE_MEMZONE_NAMESIZE];
385 const struct rte_memzone *mz;
387 /* compilation-time checks */
388 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
389 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
390 RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
391 sizeof(d->in_flight_bitmask) * CHAR_BIT);
393 if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
398 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
399 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
406 snprintf(d->name, sizeof(d->name), "%s", name);
407 d->num_workers = num_workers;
409 distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
410 rte_distributor_list);
412 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
413 TAILQ_INSERT_TAIL(distributor_list, d, next);
414 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);