4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_DISTRIBUTE_H_
35 #define _RTE_DISTRIBUTE_H_
41 * The distributor is a component which is designed to pass packets
42 * one-at-a-time to workers, with dynamic load balancing.
49 #define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
51 struct rte_distributor;
55 * Function to create a new distributor instance
57 * Reserves the memory needed for the distributor operation and
58 * initializes the distributor to work with the configured number of workers.
61 * The name to be given to the distributor instance.
63 * The NUMA node on which the memory is to be allocated
65 * The maximum number of workers that will request packets from this
68 * The newly created distributor instance
70 struct rte_distributor *
71 rte_distributor_create(const char *name, unsigned socket_id,
72 unsigned num_workers);
74 /* *** APIS to be called on the distributor lcore *** */
76 * The following APIs are the public APIs which are designed for use on a
77 * single lcore which acts as the distributor lcore for a given distributor
78 * instance. These functions cannot be called on multiple cores simultaneously
79 * without using locking to protect access to the internals of the distributor.
81 * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
82 * for the same distributor instance, otherwise deadlock will result.
86 * Process a set of packets by distributing them among workers that request
87 * packets. The distributor will ensure that no two packets that have the
88 * same flow id, or tag, in the mbuf will be procesed at the same time.
90 * The user is advocated to set tag for each mbuf before calling this function.
91 * If user doesn't set the tag, the tag value can be various values depending on
92 * driver implementation and configuration.
94 * This is not multi-thread safe and should only be called on a single lcore.
97 * The distributor instance to be used
99 * The mbufs to be distributed
101 * The number of mbufs in the mbufs array
103 * The number of mbufs processed.
106 rte_distributor_process(struct rte_distributor *d,
107 struct rte_mbuf **mbufs, unsigned num_mbufs);
110 * Get a set of mbufs that have been returned to the distributor by workers
112 * This should only be called on the same lcore as rte_distributor_process()
115 * The distributor instance to be used
117 * The mbufs pointer array to be filled in
119 * The size of the mbufs array
121 * The number of mbufs returned in the mbufs array.
124 rte_distributor_returned_pkts(struct rte_distributor *d,
125 struct rte_mbuf **mbufs, unsigned max_mbufs);
128 * Flush the distributor component, so that there are no in-flight or
129 * backlogged packets awaiting processing
131 * This should only be called on the same lcore as rte_distributor_process()
134 * The distributor instance to be used
136 * The number of queued/in-flight packets that were completed by this call.
139 rte_distributor_flush(struct rte_distributor *d);
142 * Clears the array of returned packets used as the source for the
143 * rte_distributor_returned_pkts() API call.
145 * This should only be called on the same lcore as rte_distributor_process()
148 * The distributor instance to be used
151 rte_distributor_clear_returns(struct rte_distributor *d);
153 /* *** APIS to be called on the worker lcores *** */
155 * The following APIs are the public APIs which are designed for use on
156 * multiple lcores which act as workers for a distributor. Each lcore should use
157 * a unique worker id when requesting packets.
159 * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
160 * for the same distributor instance, otherwise deadlock will result.
164 * API called by a worker to get a new packet to process. Any previous packet
165 * given to the worker is assumed to have completed processing, and may be
166 * optionally returned to the distributor via the oldpkt parameter.
169 * The distributor instance to be used
171 * The worker instance number to use - must be less that num_workers passed
172 * at distributor creation time.
174 * The previous packet, if any, being processed by the worker
177 * A new packet to be processed by the worker thread.
180 rte_distributor_get_pkt(struct rte_distributor *d,
181 unsigned worker_id, struct rte_mbuf *oldpkt);
184 * API called by a worker to return a completed packet without requesting a
185 * new packet, for example, because a worker thread is shutting down
188 * The distributor instance to be used
190 * The worker instance number to use - must be less that num_workers passed
191 * at distributor creation time.
193 * The previous packet being processed by the worker
196 rte_distributor_return_pkt(struct rte_distributor *d, unsigned worker_id,
197 struct rte_mbuf *mbuf);
200 * API called by a worker to request a new packet to process.
201 * Any previous packet given to the worker is assumed to have completed
202 * processing, and may be optionally returned to the distributor via
203 * the oldpkt parameter.
204 * Unlike rte_distributor_get_pkt(), this function does not wait for a new
205 * packet to be provided by the distributor.
207 * NOTE: after calling this function, rte_distributor_poll_pkt() should
208 * be used to poll for the packet requested. The rte_distributor_get_pkt()
209 * API should *not* be used to try and retrieve the new packet.
212 * The distributor instance to be used
214 * The worker instance number to use - must be less that num_workers passed
215 * at distributor creation time.
217 * The previous packet, if any, being processed by the worker
220 rte_distributor_request_pkt(struct rte_distributor *d,
221 unsigned worker_id, struct rte_mbuf *oldpkt);
224 * API called by a worker to check for a new packet that was previously
225 * requested by a call to rte_distributor_request_pkt(). It does not wait
226 * for the new packet to be available, but returns NULL if the request has
227 * not yet been fulfilled by the distributor.
230 * The distributor instance to be used
232 * The worker instance number to use - must be less that num_workers passed
233 * at distributor creation time.
236 * A new packet to be processed by the worker thread, or NULL if no
237 * packet is yet available.
240 rte_distributor_poll_pkt(struct rte_distributor *d,