4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_DISTRIBUTE_H_
35 #define _RTE_DISTRIBUTE_H_
41 * The distributor is a component which is designed to pass packets
42 * one-at-a-time to workers, with dynamic load balancing.
51 #define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
53 struct rte_distributor;
56 * Function to create a new distributor instance
58 * Reserves the memory needed for the distributor operation and
59 * initializes the distributor to work with the configured number of workers.
62 * The name to be given to the distributor instance.
64 * The NUMA node on which the memory is to be allocated
66 * The maximum number of workers that will request packets from this
69 * The newly created distributor instance
71 struct rte_distributor *
72 rte_distributor_create(const char *name, unsigned socket_id,
73 unsigned num_workers);
75 /* *** APIS to be called on the distributor lcore *** */
77 * The following APIs are the public APIs which are designed for use on a
78 * single lcore which acts as the distributor lcore for a given distributor
79 * instance. These functions cannot be called on multiple cores simultaneously
80 * without using locking to protect access to the internals of the distributor.
82 * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
83 * for the same distributor instance, otherwise deadlock will result.
87 * Process a set of packets by distributing them among workers that request
88 * packets. The distributor will ensure that no two packets that have the
89 * same flow id, or tag, in the mbuf will be procesed at the same time.
91 * The user is advocated to set tag for each mbuf before calling this function.
92 * If user doesn't set the tag, the tag value can be various values depending on
93 * driver implementation and configuration.
95 * This is not multi-thread safe and should only be called on a single lcore.
98 * The distributor instance to be used
100 * The mbufs to be distributed
102 * The number of mbufs in the mbufs array
104 * The number of mbufs processed.
107 rte_distributor_process(struct rte_distributor *d,
108 struct rte_mbuf **mbufs, unsigned num_mbufs);
111 * Get a set of mbufs that have been returned to the distributor by workers
113 * This should only be called on the same lcore as rte_distributor_process()
116 * The distributor instance to be used
118 * The mbufs pointer array to be filled in
120 * The size of the mbufs array
122 * The number of mbufs returned in the mbufs array.
125 rte_distributor_returned_pkts(struct rte_distributor *d,
126 struct rte_mbuf **mbufs, unsigned max_mbufs);
129 * Flush the distributor component, so that there are no in-flight or
130 * backlogged packets awaiting processing
132 * This should only be called on the same lcore as rte_distributor_process()
135 * The distributor instance to be used
137 * The number of queued/in-flight packets that were completed by this call.
140 rte_distributor_flush(struct rte_distributor *d);
143 * Clears the array of returned packets used as the source for the
144 * rte_distributor_returned_pkts() API call.
146 * This should only be called on the same lcore as rte_distributor_process()
149 * The distributor instance to be used
152 rte_distributor_clear_returns(struct rte_distributor *d);
154 /* *** APIS to be called on the worker lcores *** */
156 * The following APIs are the public APIs which are designed for use on
157 * multiple lcores which act as workers for a distributor. Each lcore should use
158 * a unique worker id when requesting packets.
160 * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
161 * for the same distributor instance, otherwise deadlock will result.
165 * API called by a worker to get a new packet to process. Any previous packet
166 * given to the worker is assumed to have completed processing, and may be
167 * optionally returned to the distributor via the oldpkt parameter.
170 * The distributor instance to be used
172 * The worker instance number to use - must be less that num_workers passed
173 * at distributor creation time.
175 * The previous packet, if any, being processed by the worker
178 * A new packet to be processed by the worker thread.
181 rte_distributor_get_pkt(struct rte_distributor *d,
182 unsigned worker_id, struct rte_mbuf *oldpkt);
185 * API called by a worker to return a completed packet without requesting a
186 * new packet, for example, because a worker thread is shutting down
189 * The distributor instance to be used
191 * The worker instance number to use - must be less that num_workers passed
192 * at distributor creation time.
194 * The previous packet being processed by the worker
197 rte_distributor_return_pkt(struct rte_distributor *d, unsigned worker_id,
198 struct rte_mbuf *mbuf);
201 * API called by a worker to request a new packet to process.
202 * Any previous packet given to the worker is assumed to have completed
203 * processing, and may be optionally returned to the distributor via
204 * the oldpkt parameter.
205 * Unlike rte_distributor_get_pkt(), this function does not wait for a new
206 * packet to be provided by the distributor.
208 * NOTE: after calling this function, rte_distributor_poll_pkt() should
209 * be used to poll for the packet requested. The rte_distributor_get_pkt()
210 * API should *not* be used to try and retrieve the new packet.
213 * The distributor instance to be used
215 * The worker instance number to use - must be less that num_workers passed
216 * at distributor creation time.
218 * The previous packet, if any, being processed by the worker
221 rte_distributor_request_pkt(struct rte_distributor *d,
222 unsigned worker_id, struct rte_mbuf *oldpkt);
225 * API called by a worker to check for a new packet that was previously
226 * requested by a call to rte_distributor_request_pkt(). It does not wait
227 * for the new packet to be available, but returns NULL if the request has
228 * not yet been fulfilled by the distributor.
231 * The distributor instance to be used
233 * The worker instance number to use - must be less that num_workers passed
234 * at distributor creation time.
237 * A new packet to be processed by the worker thread, or NULL if no
238 * packet is yet available.
241 rte_distributor_poll_pkt(struct rte_distributor *d,