1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_cycles.h>
11 #include "rte_eth_softnic_internals.h"
14 * Master thread: data plane thread init
17 softnic_thread_free(struct pmd_internals *softnic)
21 RTE_LCORE_FOREACH_SLAVE(i) {
22 struct softnic_thread *t = &softnic->thread[i];
26 rte_ring_free(t->msgq_req);
29 rte_ring_free(t->msgq_rsp);
34 softnic_thread_init(struct pmd_internals *softnic)
38 RTE_LCORE_FOREACH_SLAVE(i) {
39 char ring_name[NAME_MAX];
40 struct rte_ring *msgq_req, *msgq_rsp;
41 struct softnic_thread *t = &softnic->thread[i];
42 struct softnic_thread_data *t_data = &softnic->thread_data[i];
43 uint32_t cpu_id = rte_lcore_to_socket_id(i);
46 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
50 msgq_req = rte_ring_create(ring_name,
53 RING_F_SP_ENQ | RING_F_SC_DEQ);
55 if (msgq_req == NULL) {
56 softnic_thread_free(softnic);
60 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
64 msgq_rsp = rte_ring_create(ring_name,
67 RING_F_SP_ENQ | RING_F_SC_DEQ);
69 if (msgq_rsp == NULL) {
70 softnic_thread_free(softnic);
74 /* Master thread records */
75 t->msgq_req = msgq_req;
76 t->msgq_rsp = msgq_rsp;
79 /* Data plane thread records */
80 t_data->n_pipelines = 0;
81 t_data->msgq_req = msgq_req;
82 t_data->msgq_rsp = msgq_rsp;
83 t_data->timer_period =
84 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
85 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
86 t_data->time_next_min = t_data->time_next;
93 thread_is_running(uint32_t thread_id)
95 enum rte_lcore_state_t thread_state;
97 thread_state = rte_eal_get_lcore_state(thread_id);
98 return (thread_state == RUNNING)? 1 : 0;
102 * Pipeline is running when:
103 * (A) Pipeline is mapped to a data plane thread AND
104 * (B) Its data plane thread is in RUNNING state.
107 pipeline_is_running(struct pipeline *p)
112 return thread_is_running(p->thread_id);
116 * Master thread & data plane threads: message passing
118 enum thread_req_type {
122 struct thread_msg_req {
123 enum thread_req_type type;
126 struct thread_msg_rsp {
131 * Data plane threads: message handling
133 static inline struct thread_msg_req *
134 thread_msg_recv(struct rte_ring *msgq_req)
136 struct thread_msg_req *req;
138 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
147 thread_msg_send(struct rte_ring *msgq_rsp,
148 struct thread_msg_rsp *rsp)
153 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
154 } while (status == -ENOBUFS);
158 thread_msg_handle(struct softnic_thread_data *t)
161 struct thread_msg_req *req;
162 struct thread_msg_rsp *rsp;
164 req = thread_msg_recv(t->msgq_req);
170 rsp = (struct thread_msg_rsp *)req;
174 thread_msg_send(t->msgq_rsp, rsp);
179 * Master thread & data plane threads: message passing
181 enum pipeline_req_type {
183 PIPELINE_REQ_PORT_IN_ENABLE,
184 PIPELINE_REQ_PORT_IN_DISABLE,
189 struct pipeline_msg_req {
190 enum pipeline_req_type type;
191 uint32_t id; /* Port IN, port OUT or table ID */
194 struct pipeline_msg_rsp {
201 static struct pipeline_msg_req *
202 pipeline_msg_alloc(void)
204 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
205 sizeof(struct pipeline_msg_rsp));
207 return calloc(1, size);
211 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
216 static struct pipeline_msg_rsp *
217 pipeline_msg_send_recv(struct pipeline *p,
218 struct pipeline_msg_req *req)
220 struct rte_ring *msgq_req = p->msgq_req;
221 struct rte_ring *msgq_rsp = p->msgq_rsp;
222 struct pipeline_msg_rsp *rsp;
227 status = rte_ring_sp_enqueue(msgq_req, req);
228 } while (status == -ENOBUFS);
232 status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
233 } while (status != 0);
239 softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
240 const char *pipeline_name,
244 struct pipeline_msg_req *req;
245 struct pipeline_msg_rsp *rsp;
248 /* Check input params */
249 if (pipeline_name == NULL)
252 p = softnic_pipeline_find(softnic, pipeline_name);
254 port_id >= p->n_ports_in)
257 if (!pipeline_is_running(p)) {
258 status = rte_pipeline_port_in_enable(p->p, port_id);
262 /* Allocate request */
263 req = pipeline_msg_alloc();
268 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
271 /* Send request and wait for response */
272 rsp = pipeline_msg_send_recv(p, req);
277 status = rsp->status;
280 pipeline_msg_free(rsp);
286 softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
287 const char *pipeline_name,
291 struct pipeline_msg_req *req;
292 struct pipeline_msg_rsp *rsp;
295 /* Check input params */
296 if (pipeline_name == NULL)
299 p = softnic_pipeline_find(softnic, pipeline_name);
301 port_id >= p->n_ports_in)
304 if (!pipeline_is_running(p)) {
305 status = rte_pipeline_port_in_disable(p->p, port_id);
309 /* Allocate request */
310 req = pipeline_msg_alloc();
315 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
318 /* Send request and wait for response */
319 rsp = pipeline_msg_send_recv(p, req);
324 status = rsp->status;
327 pipeline_msg_free(rsp);
333 * Data plane threads: message handling
335 static inline struct pipeline_msg_req *
336 pipeline_msg_recv(struct rte_ring *msgq_req)
338 struct pipeline_msg_req *req;
340 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
349 pipeline_msg_send(struct rte_ring *msgq_rsp,
350 struct pipeline_msg_rsp *rsp)
355 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
356 } while (status == -ENOBUFS);
359 static struct pipeline_msg_rsp *
360 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
361 struct pipeline_msg_req *req)
363 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
364 uint32_t port_id = req->id;
366 rsp->status = rte_pipeline_port_in_enable(p->p,
372 static struct pipeline_msg_rsp *
373 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
374 struct pipeline_msg_req *req)
376 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
377 uint32_t port_id = req->id;
379 rsp->status = rte_pipeline_port_in_disable(p->p,
386 pipeline_msg_handle(struct pipeline_data *p)
389 struct pipeline_msg_req *req;
390 struct pipeline_msg_rsp *rsp;
392 req = pipeline_msg_recv(p->msgq_req);
397 case PIPELINE_REQ_PORT_IN_ENABLE:
398 rsp = pipeline_msg_handle_port_in_enable(p, req);
401 case PIPELINE_REQ_PORT_IN_DISABLE:
402 rsp = pipeline_msg_handle_port_in_disable(p, req);
406 rsp = (struct pipeline_msg_rsp *)req;
410 pipeline_msg_send(p->msgq_rsp, rsp);
415 * Data plane threads: main
418 rte_pmd_softnic_run(uint16_t port_id)
420 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
421 struct pmd_internals *softnic;
422 struct softnic_thread_data *t;
423 uint32_t thread_id, j;
425 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
426 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
429 softnic = dev->data->dev_private;
430 thread_id = rte_lcore_id();
431 t = &softnic->thread_data[thread_id];
435 for (j = 0; j < t->n_pipelines; j++)
436 rte_pipeline_run(t->p[j]);
439 if ((t->iter & 0xFLLU) == 0) {
440 uint64_t time = rte_get_tsc_cycles();
441 uint64_t time_next_min = UINT64_MAX;
443 if (time < t->time_next_min)
446 /* Pipeline message queues */
447 for (j = 0; j < t->n_pipelines; j++) {
448 struct pipeline_data *p =
449 &t->pipeline_data[j];
450 uint64_t time_next = p->time_next;
452 if (time_next <= time) {
453 pipeline_msg_handle(p);
454 rte_pipeline_flush(p->p);
455 time_next = time + p->timer_period;
456 p->time_next = time_next;
459 if (time_next < time_next_min)
460 time_next_min = time_next;
463 /* Thread message queues */
465 uint64_t time_next = t->time_next;
467 if (time_next <= time) {
468 thread_msg_handle(t);
469 time_next = time + t->timer_period;
470 t->time_next = time_next;
473 if (time_next < time_next_min)
474 time_next_min = time_next;
477 t->time_next_min = time_next_min;