1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
165 struct thread_msg_req {
166 enum thread_req_type type;
169 struct thread_msg_rsp {
174 * Data plane threads: message handling
176 static inline struct thread_msg_req *
177 thread_msg_recv(struct rte_ring *msgq_req)
179 struct thread_msg_req *req;
181 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
190 thread_msg_send(struct rte_ring *msgq_rsp,
191 struct thread_msg_rsp *rsp)
196 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
197 } while (status == -ENOBUFS);
201 thread_msg_handle(struct thread_data *t)
204 struct thread_msg_req *req;
205 struct thread_msg_rsp *rsp;
207 req = thread_msg_recv(t->msgq_req);
213 rsp = (struct thread_msg_rsp *) req;
217 thread_msg_send(t->msgq_rsp, rsp);
222 * Master thread & data plane threads: message passing
225 enum pipeline_req_type {
229 struct pipeline_msg_req {
230 enum pipeline_req_type type;
233 struct pipeline_msg_rsp {
238 * Data plane threads: message handling
240 static inline struct pipeline_msg_req *
241 pipeline_msg_recv(struct rte_ring *msgq_req)
243 struct pipeline_msg_req *req;
245 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
254 pipeline_msg_send(struct rte_ring *msgq_rsp,
255 struct pipeline_msg_rsp *rsp)
260 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
261 } while (status == -ENOBUFS);
265 pipeline_msg_handle(struct pipeline_data *p)
268 struct pipeline_msg_req *req;
269 struct pipeline_msg_rsp *rsp;
271 req = pipeline_msg_recv(p->msgq_req);
277 rsp = (struct pipeline_msg_rsp *) req;
281 pipeline_msg_send(p->msgq_rsp, rsp);
286 * Data plane threads: main
289 thread_main(void *arg __rte_unused)
291 struct thread_data *t;
292 uint32_t thread_id, i;
294 thread_id = rte_lcore_id();
295 t = &thread_data[thread_id];
302 for (j = 0; j < t->n_pipelines; j++)
303 rte_pipeline_run(t->p[j]);
306 if ((i & 0xF) == 0) {
307 uint64_t time = rte_get_tsc_cycles();
308 uint64_t time_next_min = UINT64_MAX;
310 if (time < t->time_next_min)
313 /* Pipeline message queues */
314 for (j = 0; j < t->n_pipelines; j++) {
315 struct pipeline_data *p =
316 &t->pipeline_data[j];
317 uint64_t time_next = p->time_next;
319 if (time_next <= time) {
320 pipeline_msg_handle(p);
321 rte_pipeline_flush(p->p);
322 time_next = time + p->timer_period;
323 p->time_next = time_next;
326 if (time_next < time_next_min)
327 time_next_min = time_next;
330 /* Thread message queues */
332 uint64_t time_next = t->time_next;
334 if (time_next <= time) {
335 thread_msg_handle(t);
336 time_next = time + t->timer_period;
337 t->time_next = time_next;
340 if (time_next < time_next_min)
341 time_next_min = time_next;
344 t->time_next_min = time_next_min;