4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_pipeline.h>
38 #include "pipeline_common_be.h"
43 thread_msg_recv(struct rte_ring *r)
46 int status = rte_ring_sc_dequeue(r, &msg);
55 thread_msg_send(struct rte_ring *r,
61 status = rte_ring_sp_enqueue(r, msg);
62 } while (status == -ENOBUFS);
66 thread_pipeline_enable(struct app_thread_data *t,
67 struct thread_pipeline_enable_msg_req *req)
69 struct app_thread_pipeline_data *p;
71 if (req->f_run == NULL) {
72 if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
75 if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
79 p = (req->f_run == NULL) ?
80 &t->regular[t->n_regular] :
81 &t->custom[t->n_custom];
83 p->pipeline_id = req->pipeline_id;
85 p->f_run = req->f_run;
86 p->f_timer = req->f_timer;
87 p->timer_period = req->timer_period;
90 if (req->f_run == NULL)
99 thread_pipeline_disable(struct app_thread_data *t,
100 struct thread_pipeline_disable_msg_req *req)
102 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
103 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
106 /* search regular pipelines of current thread */
107 for (i = 0; i < n_regular; i++) {
108 if (t->regular[i].pipeline_id != req->pipeline_id)
111 if (i < n_regular - 1)
112 memcpy(&t->regular[i],
114 (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
117 t->n_regular = n_regular;
122 /* search custom pipelines of current thread */
123 for (i = 0; i < n_custom; i++) {
124 if (t->custom[i].pipeline_id != req->pipeline_id)
127 if (i < n_custom - 1)
128 memcpy(&t->custom[i],
130 (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
133 t->n_custom = n_custom;
138 /* return if pipeline not found */
143 thread_msg_req_handle(struct app_thread_data *t)
146 struct thread_msg_req *req;
147 struct thread_msg_rsp *rsp;
149 msg_ptr = thread_msg_recv(t->msgq_in);
155 case THREAD_MSG_REQ_PIPELINE_ENABLE: {
156 rsp->status = thread_pipeline_enable(t,
157 (struct thread_pipeline_enable_msg_req *) req);
158 thread_msg_send(t->msgq_out, rsp);
162 case THREAD_MSG_REQ_PIPELINE_DISABLE: {
163 rsp->status = thread_pipeline_disable(t,
164 (struct thread_pipeline_disable_msg_req *) req);
165 thread_msg_send(t->msgq_out, rsp);
176 app_thread(void *arg)
178 struct app_params *app = (struct app_params *) arg;
179 uint32_t core_id = rte_lcore_id(), i, j;
180 struct app_thread_data *t = &app->thread_data[core_id];
183 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
184 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
186 /* Run regular pipelines */
187 for (j = 0; j < n_regular; j++) {
188 struct app_thread_pipeline_data *data = &t->regular[j];
189 struct pipeline *p = data->be;
191 rte_pipeline_run(p->p);
194 /* Run custom pipelines */
195 for (j = 0; j < n_custom; j++) {
196 struct app_thread_pipeline_data *data = &t->custom[j];
198 data->f_run(data->be);
202 if ((i & 0xF) == 0) {
203 uint64_t time = rte_get_tsc_cycles();
204 uint64_t t_deadline = UINT64_MAX;
206 if (time < t->deadline)
209 /* Timer for regular pipelines */
210 for (j = 0; j < n_regular; j++) {
211 struct app_thread_pipeline_data *data =
213 uint64_t p_deadline = data->deadline;
215 if (p_deadline <= time) {
216 data->f_timer(data->be);
217 p_deadline = time + data->timer_period;
218 data->deadline = p_deadline;
221 if (p_deadline < t_deadline)
222 t_deadline = p_deadline;
225 /* Timer for custom pipelines */
226 for (j = 0; j < n_custom; j++) {
227 struct app_thread_pipeline_data *data =
229 uint64_t p_deadline = data->deadline;
231 if (p_deadline <= time) {
232 data->f_timer(data->be);
233 p_deadline = time + data->timer_period;
234 data->deadline = p_deadline;
237 if (p_deadline < t_deadline)
238 t_deadline = p_deadline;
241 /* Timer for thread message request */
243 uint64_t deadline = t->thread_req_deadline;
245 if (deadline <= time) {
246 thread_msg_req_handle(t);
247 deadline = time + t->timer_period;
248 t->thread_req_deadline = deadline;
251 if (deadline < t_deadline)
252 t_deadline = deadline;
255 t->deadline = t_deadline;