1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_memory.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_function_versioning.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16 #include <rte_tailq.h>
18 #include "rte_distributor_single.h"
19 #include "distributor_private.h"
21 TAILQ_HEAD(rte_distributor_list, rte_distributor_single);
23 static struct rte_tailq_elem rte_distributor_tailq = {
24 .name = "RTE_DISTRIBUTOR",
26 EAL_REGISTER_TAILQ(rte_distributor_tailq)
28 /**** APIs called by workers ****/
31 rte_distributor_request_pkt_single(struct rte_distributor_single *d,
32 unsigned worker_id, struct rte_mbuf *oldpkt)
34 union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
35 int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
36 | RTE_DISTRIB_GET_BUF;
37 while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
38 & RTE_DISTRIB_FLAGS_MASK))
41 /* Sync with distributor on GET_BUF flag. */
42 __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
46 rte_distributor_poll_pkt_single(struct rte_distributor_single *d,
49 union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
50 /* Sync with distributor. Acquire bufptr64. */
51 if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
52 & RTE_DISTRIB_GET_BUF)
55 /* since bufptr64 is signed, this should be an arithmetic shift */
56 int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
57 return (struct rte_mbuf *)((uintptr_t)ret);
61 rte_distributor_get_pkt_single(struct rte_distributor_single *d,
62 unsigned worker_id, struct rte_mbuf *oldpkt)
65 rte_distributor_request_pkt_single(d, worker_id, oldpkt);
66 while ((ret = rte_distributor_poll_pkt_single(d, worker_id)) == NULL)
72 rte_distributor_return_pkt_single(struct rte_distributor_single *d,
73 unsigned worker_id, struct rte_mbuf *oldpkt)
75 union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
76 uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
77 | RTE_DISTRIB_RETURN_BUF;
78 /* Sync with distributor on RETURN_BUF flag. */
79 __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
83 /**** APIs called on distributor core ***/
85 /* as name suggests, adds a packet to the backlog for a particular worker */
87 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
89 if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
92 bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
97 /* takes the next packet for a worker off the backlog */
99 backlog_pop(struct rte_distributor_backlog *bl)
102 return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
105 /* stores a packet returned from a worker inside the returns array */
107 store_return(uintptr_t oldbuf, struct rte_distributor_single *d,
108 unsigned *ret_start, unsigned *ret_count)
110 /* store returns in a circular buffer - code is branch-free */
111 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
113 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
114 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
118 handle_worker_shutdown(struct rte_distributor_single *d, unsigned int wkr)
120 d->in_flight_tags[wkr] = 0;
121 d->in_flight_bitmask &= ~(1UL << wkr);
122 /* Sync with worker. Release bufptr64. */
123 __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
124 if (unlikely(d->backlog[wkr].count != 0)) {
125 /* On return of a packet, we need to move the
126 * queued packets for this core elsewhere.
127 * Easiest solution is to set things up for
128 * a recursive call. That will cause those
129 * packets to be queued up for the next free
130 * core, i.e. it will return as soon as a
131 * core becomes free to accept the first
132 * packet, as subsequent ones will be added to
133 * the backlog for that core.
135 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
137 struct rte_distributor_backlog *bl = &d->backlog[wkr];
139 for (i = 0; i < bl->count; i++) {
140 unsigned idx = (bl->start + i) &
141 RTE_DISTRIB_BACKLOG_MASK;
142 pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
143 RTE_DISTRIB_FLAG_BITS));
146 * Note that the tags were set before first level call
147 * to rte_distributor_process.
149 rte_distributor_process_single(d, pkts, i);
150 bl->count = bl->start = 0;
154 /* this function is called when process() fn is called without any new
155 * packets. It goes through all the workers and clears any returned packets
156 * to do a partial flush.
159 process_returns(struct rte_distributor_single *d)
162 unsigned flushed = 0;
163 unsigned ret_start = d->returns.start,
164 ret_count = d->returns.count;
166 for (wkr = 0; wkr < d->num_workers; wkr++) {
167 uintptr_t oldbuf = 0;
168 /* Sync with worker. Acquire bufptr64. */
169 const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
172 if (data & RTE_DISTRIB_GET_BUF) {
174 if (d->backlog[wkr].count)
175 /* Sync with worker. Release bufptr64. */
176 __atomic_store_n(&(d->bufs[wkr].bufptr64),
177 backlog_pop(&d->backlog[wkr]),
180 /* Sync with worker on GET_BUF flag. */
181 __atomic_store_n(&(d->bufs[wkr].bufptr64),
184 d->in_flight_tags[wkr] = 0;
185 d->in_flight_bitmask &= ~(1UL << wkr);
187 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
188 } else if (data & RTE_DISTRIB_RETURN_BUF) {
189 handle_worker_shutdown(d, wkr);
190 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
193 store_return(oldbuf, d, &ret_start, &ret_count);
196 d->returns.start = ret_start;
197 d->returns.count = ret_count;
202 /* process a set of packets to distribute them to workers */
204 rte_distributor_process_single(struct rte_distributor_single *d,
205 struct rte_mbuf **mbufs, unsigned num_mbufs)
207 unsigned next_idx = 0;
209 struct rte_mbuf *next_mb = NULL;
210 int64_t next_value = 0;
211 uint32_t new_tag = 0;
212 unsigned ret_start = d->returns.start,
213 ret_count = d->returns.count;
215 if (unlikely(num_mbufs == 0))
216 return process_returns(d);
218 while (next_idx < num_mbufs || next_mb != NULL) {
219 uintptr_t oldbuf = 0;
220 /* Sync with worker. Acquire bufptr64. */
221 int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
225 next_mb = mbufs[next_idx++];
226 next_value = (((int64_t)(uintptr_t)next_mb)
227 << RTE_DISTRIB_FLAG_BITS);
229 * User is advocated to set tag value for each
230 * mbuf before calling rte_distributor_process.
231 * User defined tags are used to identify flows,
234 new_tag = next_mb->hash.usr;
237 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
238 * then the size of match has to be expanded.
243 * to scan for a match use "xor" and "not" to get a 0/1
244 * value, then use shifting to merge to single "match"
245 * variable, where a one-bit indicates a match for the
246 * worker given by the bit-position
248 for (i = 0; i < d->num_workers; i++)
249 match |= (!(d->in_flight_tags[i] ^ new_tag)
252 /* Only turned-on bits are considered as match */
253 match &= d->in_flight_bitmask;
257 unsigned worker = __builtin_ctzl(match);
258 if (add_to_backlog(&d->backlog[worker],
264 if ((data & RTE_DISTRIB_GET_BUF) &&
265 (d->backlog[wkr].count || next_mb)) {
267 if (d->backlog[wkr].count)
268 /* Sync with worker. Release bufptr64. */
269 __atomic_store_n(&(d->bufs[wkr].bufptr64),
270 backlog_pop(&d->backlog[wkr]),
274 /* Sync with worker. Release bufptr64. */
275 __atomic_store_n(&(d->bufs[wkr].bufptr64),
278 d->in_flight_tags[wkr] = new_tag;
279 d->in_flight_bitmask |= (1UL << wkr);
282 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
283 } else if (data & RTE_DISTRIB_RETURN_BUF) {
284 handle_worker_shutdown(d, wkr);
285 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
288 /* store returns in a circular buffer */
289 store_return(oldbuf, d, &ret_start, &ret_count);
291 if (++wkr == d->num_workers)
294 /* to finish, check all workers for backlog and schedule work for them
295 * if they are ready */
296 for (wkr = 0; wkr < d->num_workers; wkr++)
297 if (d->backlog[wkr].count &&
298 /* Sync with worker. Acquire bufptr64. */
299 (__atomic_load_n(&(d->bufs[wkr].bufptr64),
300 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
302 int64_t oldbuf = d->bufs[wkr].bufptr64 >>
303 RTE_DISTRIB_FLAG_BITS;
305 store_return(oldbuf, d, &ret_start, &ret_count);
307 /* Sync with worker. Release bufptr64. */
308 __atomic_store_n(&(d->bufs[wkr].bufptr64),
309 backlog_pop(&d->backlog[wkr]),
313 d->returns.start = ret_start;
314 d->returns.count = ret_count;
318 /* return to the caller, packets returned from workers */
320 rte_distributor_returned_pkts_single(struct rte_distributor_single *d,
321 struct rte_mbuf **mbufs, unsigned max_mbufs)
323 struct rte_distributor_returned_pkts *returns = &d->returns;
324 unsigned retval = (max_mbufs < returns->count) ?
325 max_mbufs : returns->count;
328 for (i = 0; i < retval; i++) {
329 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
330 mbufs[i] = returns->mbufs[idx];
338 /* return the number of packets in-flight in a distributor, i.e. packets
339 * being worked on or queued up in a backlog.
341 static inline unsigned
342 total_outstanding(const struct rte_distributor_single *d)
344 unsigned wkr, total_outstanding;
346 total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
348 for (wkr = 0; wkr < d->num_workers; wkr++)
349 total_outstanding += d->backlog[wkr].count;
351 return total_outstanding;
354 /* flush the distributor, so that there are no outstanding packets in flight or
357 rte_distributor_flush_single(struct rte_distributor_single *d)
359 const unsigned flushed = total_outstanding(d);
361 while (total_outstanding(d) > 0)
362 rte_distributor_process_single(d, NULL, 0);
367 /* clears the internal returns array in the distributor */
369 rte_distributor_clear_returns_single(struct rte_distributor_single *d)
371 d->returns.start = d->returns.count = 0;
373 memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
377 /* creates a distributor instance */
378 struct rte_distributor_single *
379 rte_distributor_create_single(const char *name,
381 unsigned num_workers)
383 struct rte_distributor_single *d;
384 struct rte_distributor_list *distributor_list;
385 char mz_name[RTE_MEMZONE_NAMESIZE];
386 const struct rte_memzone *mz;
388 /* compilation-time checks */
389 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
390 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
391 RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
392 sizeof(d->in_flight_bitmask) * CHAR_BIT);
394 if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
399 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
400 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
407 strlcpy(d->name, name, sizeof(d->name));
408 d->num_workers = num_workers;
410 distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
411 rte_distributor_list);
413 rte_mcfg_tailq_write_lock();
414 TAILQ_INSERT_TAIL(distributor_list, d, next);
415 rte_mcfg_tailq_write_unlock();