4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
37 #include <rte_memory.h>
38 #include <rte_cycles.h>
39 #include <rte_compat.h>
40 #include <rte_memzone.h>
41 #include <rte_errno.h>
42 #include <rte_string_fns.h>
43 #include <rte_eal_memconfig.h>
44 #include <rte_pause.h>
46 #include "rte_distributor_private.h"
47 #include "rte_distributor.h"
48 #include "rte_distributor_v20.h"
49 #include "rte_distributor_v1705.h"
51 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
53 static struct rte_tailq_elem rte_dist_burst_tailq = {
54 .name = "RTE_DIST_BURST",
56 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
58 /**** APIs called by workers ****/
60 /**** Burst Packet APIs called by workers ****/
63 rte_distributor_request_pkt_v1705(struct rte_distributor *d,
64 unsigned int worker_id, struct rte_mbuf **oldpkt,
67 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
70 volatile int64_t *retptr64;
72 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
73 rte_distributor_request_pkt_v20(d->d_v20,
74 worker_id, oldpkt[0]);
78 retptr64 = &(buf->retptr64[0]);
79 /* Spin while handshake bits are set (scheduler clears it) */
80 while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
82 uint64_t t = rte_rdtsc()+100;
84 while (rte_rdtsc() < t)
89 * OK, if we've got here, then the scheduler has just cleared the
90 * handshake bits. Populate the retptrs with returning packets.
93 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
96 /* Set Return bit for each packet returned */
97 for (i = count; i-- > 0; )
99 (((int64_t)(uintptr_t)(oldpkt[i])) <<
100 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
103 * Finally, set the GET_BUF to signal to distributor that cache
104 * line is ready for processing
106 *retptr64 |= RTE_DISTRIB_GET_BUF;
108 BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
109 MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
110 unsigned int worker_id, struct rte_mbuf **oldpkt,
112 rte_distributor_request_pkt_v1705);
115 rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
116 unsigned int worker_id, struct rte_mbuf **pkts)
118 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
123 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
124 pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
125 return (pkts[0]) ? 1 : 0;
128 /* If bit is set, return */
129 if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
132 /* since bufptr64 is signed, this should be an arithmetic shift */
133 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
134 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
135 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
136 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
141 * so now we've got the contents of the cacheline into an array of
142 * mbuf pointers, so toggle the bit so scheduler can start working
143 * on the next cacheline while we're working.
145 buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF;
149 BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
150 MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
151 unsigned int worker_id, struct rte_mbuf **pkts),
152 rte_distributor_poll_pkt_v1705);
155 rte_distributor_get_pkt_v1705(struct rte_distributor *d,
156 unsigned int worker_id, struct rte_mbuf **pkts,
157 struct rte_mbuf **oldpkt, unsigned int return_count)
161 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
162 if (return_count <= 1) {
163 pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
164 worker_id, oldpkt[0]);
165 return (pkts[0]) ? 1 : 0;
170 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
172 count = rte_distributor_poll_pkt(d, worker_id, pkts);
173 while (count == -1) {
174 uint64_t t = rte_rdtsc() + 100;
176 while (rte_rdtsc() < t)
179 count = rte_distributor_poll_pkt(d, worker_id, pkts);
183 BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
184 MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
185 unsigned int worker_id, struct rte_mbuf **pkts,
186 struct rte_mbuf **oldpkt, unsigned int return_count),
187 rte_distributor_get_pkt_v1705);
190 rte_distributor_return_pkt_v1705(struct rte_distributor *d,
191 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
193 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
196 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
198 return rte_distributor_return_pkt_v20(d->d_v20,
199 worker_id, oldpkt[0]);
204 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
205 /* Switch off the return bit first */
206 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
208 for (i = num; i-- > 0; )
209 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
210 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
212 /* set the GET_BUF but even if we got no returns */
213 buf->retptr64[0] |= RTE_DISTRIB_GET_BUF;
217 BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
218 MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
219 unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
220 rte_distributor_return_pkt_v1705);
222 /**** APIs called on distributor core ***/
224 /* stores a packet returned from a worker inside the returns array */
226 store_return(uintptr_t oldbuf, struct rte_distributor *d,
227 unsigned int *ret_start, unsigned int *ret_count)
231 /* store returns in a circular buffer */
232 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
234 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
235 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
239 * Match then flow_ids (tags) of the incoming packets to the flow_ids
240 * of the inflight packets (both inflight on the workers and in each worker
241 * backlog). This will then allow us to pin those packets to the relevant
242 * workers to give us our atomic flow pinning.
245 find_match_scalar(struct rte_distributor *d,
247 uint16_t *output_ptr)
249 struct rte_distributor_backlog *bl;
254 * 1. Loop through all worker ID's
255 * 2. Compare the current inflights to the incoming tags
256 * 3. Compare the current backlog to the incoming tags
257 * 4. Add any matches to the output
260 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
263 for (i = 0; i < d->num_workers; i++) {
266 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
267 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
268 if (d->in_flight_tags[i][j] == data_ptr[w]) {
272 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
273 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
274 if (bl->tags[j] == data_ptr[w]) {
281 * At this stage, the output contains 8 16-bit values, with
282 * each non-zero value containing the worker ID on which the
283 * corresponding flow is pinned to.
289 * When the handshake bits indicate that there are packets coming
290 * back from the worker, this function is called to copy and store
291 * the valid returned pointers (store_return).
294 handle_returns(struct rte_distributor *d, unsigned int wkr)
296 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
298 unsigned int ret_start = d->returns.start,
299 ret_count = d->returns.count;
300 unsigned int count = 0;
303 if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) {
304 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
305 if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
306 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
307 RTE_DISTRIB_FLAG_BITS));
308 /* store returns in a circular buffer */
309 store_return(oldbuf, d, &ret_start, &ret_count);
311 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
314 d->returns.start = ret_start;
315 d->returns.count = ret_count;
316 /* Clear for the worker to populate with more returns */
317 buf->retptr64[0] = 0;
323 * This function releases a burst (cache line) to a worker.
324 * It is called from the process function when a cacheline is
325 * full to make room for more packets for that worker, or when
326 * all packets have been assigned to bursts and need to be flushed
328 * It also needs to wait for any outstanding packets from the worker
329 * before sending out new packets.
332 release(struct rte_distributor *d, unsigned int wkr)
334 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
337 while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF))
340 handle_returns(d, wkr);
344 for (i = 0; i < d->backlog[wkr].count; i++) {
345 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
346 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
347 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
350 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
351 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
352 d->in_flight_tags[wkr][i] = 0;
355 d->backlog[wkr].count = 0;
357 /* Clear the GET bit */
358 buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF;
364 /* process a set of packets to distribute them to workers */
366 rte_distributor_process_v1705(struct rte_distributor *d,
367 struct rte_mbuf **mbufs, unsigned int num_mbufs)
369 unsigned int next_idx = 0;
370 static unsigned int wkr;
371 struct rte_mbuf *next_mb = NULL;
372 int64_t next_value = 0;
373 uint16_t new_tag = 0;
374 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
375 unsigned int i, j, w, wid;
377 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
378 /* Call the old API */
379 return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
382 if (unlikely(num_mbufs == 0)) {
383 /* Flush out all non-full cache-lines to workers. */
384 for (wid = 0 ; wid < d->num_workers; wid++) {
385 if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) {
387 handle_returns(d, wid);
393 while (next_idx < num_mbufs) {
394 uint16_t matches[RTE_DIST_BURST_SIZE];
397 if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)
398 d->bufs[wkr].count = 0;
400 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
401 pkts = num_mbufs - next_idx;
403 pkts = RTE_DIST_BURST_SIZE;
405 for (i = 0; i < pkts; i++) {
406 if (mbufs[next_idx + i]) {
407 /* flows have to be non-zero */
408 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
412 for (; i < RTE_DIST_BURST_SIZE; i++)
415 switch (d->dist_match_fn) {
416 case RTE_DIST_MATCH_VECTOR:
417 find_match_vec(d, &flows[0], &matches[0]);
420 find_match_scalar(d, &flows[0], &matches[0]);
424 * Matches array now contain the intended worker ID (+1) of
425 * the incoming packets. Any zeroes need to be assigned
429 for (j = 0; j < pkts; j++) {
431 next_mb = mbufs[next_idx++];
432 next_value = (((int64_t)(uintptr_t)next_mb) <<
433 RTE_DISTRIB_FLAG_BITS);
435 * User is advocated to set tag vaue for each
436 * mbuf before calling rte_distributor_process.
437 * User defined tags are used to identify flows,
440 /* flows MUST be non-zero */
441 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
444 * Uncommenting the next line will cause the find_match
445 * function to be optimised out, making this function
446 * do parallel (non-atomic) distribution
448 /* matches[j] = 0; */
451 struct rte_distributor_backlog *bl =
452 &d->backlog[matches[j]-1];
453 if (unlikely(bl->count ==
454 RTE_DIST_BURST_SIZE)) {
455 release(d, matches[j]-1);
458 /* Add to worker that already has flow */
459 unsigned int idx = bl->count++;
461 bl->tags[idx] = new_tag;
462 bl->pkts[idx] = next_value;
465 struct rte_distributor_backlog *bl =
467 if (unlikely(bl->count ==
468 RTE_DIST_BURST_SIZE)) {
472 /* Add to current worker worker */
473 unsigned int idx = bl->count++;
475 bl->tags[idx] = new_tag;
476 bl->pkts[idx] = next_value;
478 * Now that we've just added an unpinned flow
479 * to a worker, we need to ensure that all
480 * other packets with that same flow will go
481 * to the same worker in this burst.
483 for (w = j; w < pkts; w++)
484 if (flows[w] == new_tag)
489 if (wkr >= d->num_workers)
493 /* Flush out all non-full cache-lines to workers. */
494 for (wid = 0 ; wid < d->num_workers; wid++)
495 if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
500 BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
501 MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
502 struct rte_mbuf **mbufs, unsigned int num_mbufs),
503 rte_distributor_process_v1705);
505 /* return to the caller, packets returned from workers */
507 rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
508 struct rte_mbuf **mbufs, unsigned int max_mbufs)
510 struct rte_distributor_returned_pkts *returns = &d->returns;
511 unsigned int retval = (max_mbufs < returns->count) ?
512 max_mbufs : returns->count;
515 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
516 /* Call the old API */
517 return rte_distributor_returned_pkts_v20(d->d_v20,
521 for (i = 0; i < retval; i++) {
522 unsigned int idx = (returns->start + i) &
523 RTE_DISTRIB_RETURNS_MASK;
525 mbufs[i] = returns->mbufs[idx];
532 BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
533 MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
534 struct rte_mbuf **mbufs, unsigned int max_mbufs),
535 rte_distributor_returned_pkts_v1705);
538 * Return the number of packets in-flight in a distributor, i.e. packets
539 * being workered on or queued up in a backlog.
541 static inline unsigned int
542 total_outstanding(const struct rte_distributor *d)
544 unsigned int wkr, total_outstanding = 0;
546 for (wkr = 0; wkr < d->num_workers; wkr++)
547 total_outstanding += d->backlog[wkr].count;
549 return total_outstanding;
553 * Flush the distributor, so that there are no outstanding packets in flight or
557 rte_distributor_flush_v1705(struct rte_distributor *d)
559 unsigned int flushed;
562 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
563 /* Call the old API */
564 return rte_distributor_flush_v20(d->d_v20);
567 flushed = total_outstanding(d);
569 while (total_outstanding(d) > 0)
570 rte_distributor_process(d, NULL, 0);
573 * Send empty burst to all workers to allow them to exit
574 * gracefully, should they need to.
576 rte_distributor_process(d, NULL, 0);
578 for (wkr = 0; wkr < d->num_workers; wkr++)
579 handle_returns(d, wkr);
583 BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
584 MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
585 rte_distributor_flush_v1705);
587 /* clears the internal returns array in the distributor */
589 rte_distributor_clear_returns_v1705(struct rte_distributor *d)
593 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
594 /* Call the old API */
595 rte_distributor_clear_returns_v20(d->d_v20);
599 /* throw away returns, so workers can exit */
600 for (wkr = 0; wkr < d->num_workers; wkr++)
601 d->bufs[wkr].retptr64[0] = 0;
603 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
604 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
605 rte_distributor_clear_returns_v1705);
607 /* creates a distributor instance */
608 struct rte_distributor *
609 rte_distributor_create_v1705(const char *name,
610 unsigned int socket_id,
611 unsigned int num_workers,
612 unsigned int alg_type)
614 struct rte_distributor *d;
615 struct rte_dist_burst_list *dist_burst_list;
616 char mz_name[RTE_MEMZONE_NAMESIZE];
617 const struct rte_memzone *mz;
620 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
622 /* compilation-time checks */
623 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
624 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
626 if (alg_type == RTE_DIST_ALG_SINGLE) {
627 d = malloc(sizeof(struct rte_distributor));
632 d->d_v20 = rte_distributor_create_v20(name,
633 socket_id, num_workers);
634 if (d->d_v20 == NULL) {
636 /* rte_errno will have been set */
639 d->alg_type = alg_type;
643 if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
648 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
649 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
656 snprintf(d->name, sizeof(d->name), "%s", name);
657 d->num_workers = num_workers;
658 d->alg_type = alg_type;
660 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
661 #if defined(RTE_ARCH_X86)
662 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
666 * Set up the backog tags so they're pointing at the second cache
667 * line for performance during flow matching
669 for (i = 0 ; i < num_workers ; i++)
670 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
672 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
673 rte_dist_burst_list);
676 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
677 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
678 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
682 BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
683 MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
684 const char *name, unsigned int socket_id,
685 unsigned int num_workers, unsigned int alg_type),
686 rte_distributor_create_v1705);