1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_cycles.h>
11 #include "rte_eth_softnic_internals.h"
14 * Master thread: data plane thread init
17 softnic_thread_free(struct pmd_internals *softnic)
21 RTE_LCORE_FOREACH_SLAVE(i) {
22 struct softnic_thread *t = &softnic->thread[i];
26 rte_ring_free(t->msgq_req);
29 rte_ring_free(t->msgq_rsp);
34 softnic_thread_init(struct pmd_internals *softnic)
38 RTE_LCORE_FOREACH_SLAVE(i) {
39 char ring_name[NAME_MAX];
40 struct rte_ring *msgq_req, *msgq_rsp;
41 struct softnic_thread *t = &softnic->thread[i];
42 struct softnic_thread_data *t_data = &softnic->thread_data[i];
43 uint32_t cpu_id = rte_lcore_to_socket_id(i);
46 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
50 msgq_req = rte_ring_create(ring_name,
53 RING_F_SP_ENQ | RING_F_SC_DEQ);
55 if (msgq_req == NULL) {
56 softnic_thread_free(softnic);
60 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
64 msgq_rsp = rte_ring_create(ring_name,
67 RING_F_SP_ENQ | RING_F_SC_DEQ);
69 if (msgq_rsp == NULL) {
70 softnic_thread_free(softnic);
74 /* Master thread records */
75 t->msgq_req = msgq_req;
76 t->msgq_rsp = msgq_rsp;
79 /* Data plane thread records */
80 t_data->n_pipelines = 0;
81 t_data->msgq_req = msgq_req;
82 t_data->msgq_rsp = msgq_rsp;
83 t_data->timer_period =
84 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
85 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
86 t_data->time_next_min = t_data->time_next;
93 thread_is_running(uint32_t thread_id)
95 enum rte_lcore_state_t thread_state;
97 thread_state = rte_eal_get_lcore_state(thread_id);
98 return (thread_state == RUNNING)? 1 : 0;
102 * Pipeline is running when:
103 * (A) Pipeline is mapped to a data plane thread AND
104 * (B) Its data plane thread is in RUNNING state.
107 pipeline_is_running(struct pipeline *p)
112 return thread_is_running(p->thread_id);
116 * Master thread & data plane threads: message passing
118 enum thread_req_type {
119 THREAD_REQ_PIPELINE_ENABLE = 0,
120 THREAD_REQ_PIPELINE_DISABLE,
124 struct thread_msg_req {
125 enum thread_req_type type;
129 struct rte_pipeline *p;
131 struct rte_table_action *a;
132 } table[RTE_PIPELINE_TABLE_MAX];
133 struct rte_ring *msgq_req;
134 struct rte_ring *msgq_rsp;
135 uint32_t timer_period_ms;
140 struct rte_pipeline *p;
145 struct thread_msg_rsp {
152 static struct thread_msg_req *
153 thread_msg_alloc(void)
155 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
156 sizeof(struct thread_msg_rsp));
158 return calloc(1, size);
162 thread_msg_free(struct thread_msg_rsp *rsp)
167 static struct thread_msg_rsp *
168 thread_msg_send_recv(struct pmd_internals *softnic,
170 struct thread_msg_req *req)
172 struct softnic_thread *t = &softnic->thread[thread_id];
173 struct rte_ring *msgq_req = t->msgq_req;
174 struct rte_ring *msgq_rsp = t->msgq_rsp;
175 struct thread_msg_rsp *rsp;
180 status = rte_ring_sp_enqueue(msgq_req, req);
181 } while (status == -ENOBUFS);
185 status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
186 } while (status != 0);
192 softnic_thread_pipeline_enable(struct pmd_internals *softnic,
194 const char *pipeline_name)
196 struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
197 struct softnic_thread *t;
198 struct thread_msg_req *req;
199 struct thread_msg_rsp *rsp;
203 /* Check input params */
204 if ((thread_id >= RTE_MAX_LCORE) ||
206 (p->n_ports_in == 0) ||
207 (p->n_ports_out == 0) ||
211 t = &softnic->thread[thread_id];
212 if ((t->enabled == 0) ||
216 if (!thread_is_running(thread_id)) {
217 struct softnic_thread_data *td = &softnic->thread_data[thread_id];
218 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
220 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
223 /* Data plane thread */
224 td->p[td->n_pipelines] = p->p;
227 for (i = 0; i < p->n_tables; i++)
228 tdp->table_data[i].a =
230 tdp->n_tables = p->n_tables;
232 tdp->msgq_req = p->msgq_req;
233 tdp->msgq_rsp = p->msgq_rsp;
234 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
235 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
240 p->thread_id = thread_id;
246 /* Allocate request */
247 req = thread_msg_alloc();
252 req->type = THREAD_REQ_PIPELINE_ENABLE;
253 req->pipeline_enable.p = p->p;
254 for (i = 0; i < p->n_tables; i++)
255 req->pipeline_enable.table[i].a =
257 req->pipeline_enable.msgq_req = p->msgq_req;
258 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
259 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
260 req->pipeline_enable.n_tables = p->n_tables;
262 /* Send request and wait for response */
263 rsp = thread_msg_send_recv(softnic, thread_id, req);
268 status = rsp->status;
271 thread_msg_free(rsp);
273 /* Request completion */
277 p->thread_id = thread_id;
284 softnic_thread_pipeline_disable(struct pmd_internals *softnic,
286 const char *pipeline_name)
288 struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
289 struct softnic_thread *t;
290 struct thread_msg_req *req;
291 struct thread_msg_rsp *rsp;
294 /* Check input params */
295 if ((thread_id >= RTE_MAX_LCORE) ||
299 t = &softnic->thread[thread_id];
306 if (p->thread_id != thread_id)
309 if (!thread_is_running(thread_id)) {
310 struct softnic_thread_data *td = &softnic->thread_data[thread_id];
313 for (i = 0; i < td->n_pipelines; i++) {
314 struct pipeline_data *tdp = &td->pipeline_data[i];
319 /* Data plane thread */
320 if (i < td->n_pipelines - 1) {
321 struct rte_pipeline *pipeline_last =
322 td->p[td->n_pipelines - 1];
323 struct pipeline_data *tdp_last =
324 &td->pipeline_data[td->n_pipelines - 1];
326 td->p[i] = pipeline_last;
327 memcpy(tdp, tdp_last, sizeof(*tdp));
341 /* Allocate request */
342 req = thread_msg_alloc();
347 req->type = THREAD_REQ_PIPELINE_DISABLE;
348 req->pipeline_disable.p = p->p;
350 /* Send request and wait for response */
351 rsp = thread_msg_send_recv(softnic, thread_id, req);
356 status = rsp->status;
359 thread_msg_free(rsp);
361 /* Request completion */
371 * Data plane threads: message handling
373 static inline struct thread_msg_req *
374 thread_msg_recv(struct rte_ring *msgq_req)
376 struct thread_msg_req *req;
378 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
387 thread_msg_send(struct rte_ring *msgq_rsp,
388 struct thread_msg_rsp *rsp)
393 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
394 } while (status == -ENOBUFS);
397 static struct thread_msg_rsp *
398 thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
399 struct thread_msg_req *req)
401 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
402 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
406 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
411 t->p[t->n_pipelines] = req->pipeline_enable.p;
413 p->p = req->pipeline_enable.p;
414 for (i = 0; i < req->pipeline_enable.n_tables; i++)
416 req->pipeline_enable.table[i].a;
418 p->n_tables = req->pipeline_enable.n_tables;
420 p->msgq_req = req->pipeline_enable.msgq_req;
421 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
423 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
424 p->time_next = rte_get_tsc_cycles() + p->timer_period;
433 static struct thread_msg_rsp *
434 thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
435 struct thread_msg_req *req)
437 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
438 uint32_t n_pipelines = t->n_pipelines;
439 struct rte_pipeline *pipeline = req->pipeline_disable.p;
443 for (i = 0; i < n_pipelines; i++) {
444 struct pipeline_data *p = &t->pipeline_data[i];
446 if (p->p != pipeline)
449 if (i < n_pipelines - 1) {
450 struct rte_pipeline *pipeline_last =
451 t->p[n_pipelines - 1];
452 struct pipeline_data *p_last =
453 &t->pipeline_data[n_pipelines - 1];
455 t->p[i] = pipeline_last;
456 memcpy(p, p_last, sizeof(*p));
465 /* should not get here */
471 thread_msg_handle(struct softnic_thread_data *t)
474 struct thread_msg_req *req;
475 struct thread_msg_rsp *rsp;
477 req = thread_msg_recv(t->msgq_req);
482 case THREAD_REQ_PIPELINE_ENABLE:
483 rsp = thread_msg_handle_pipeline_enable(t, req);
486 case THREAD_REQ_PIPELINE_DISABLE:
487 rsp = thread_msg_handle_pipeline_disable(t, req);
491 rsp = (struct thread_msg_rsp *)req;
495 thread_msg_send(t->msgq_rsp, rsp);
500 * Master thread & data plane threads: message passing
502 enum pipeline_req_type {
504 PIPELINE_REQ_PORT_IN_ENABLE,
505 PIPELINE_REQ_PORT_IN_DISABLE,
510 struct pipeline_msg_req {
511 enum pipeline_req_type type;
512 uint32_t id; /* Port IN, port OUT or table ID */
515 struct pipeline_msg_rsp {
522 static struct pipeline_msg_req *
523 pipeline_msg_alloc(void)
525 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
526 sizeof(struct pipeline_msg_rsp));
528 return calloc(1, size);
532 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
537 static struct pipeline_msg_rsp *
538 pipeline_msg_send_recv(struct pipeline *p,
539 struct pipeline_msg_req *req)
541 struct rte_ring *msgq_req = p->msgq_req;
542 struct rte_ring *msgq_rsp = p->msgq_rsp;
543 struct pipeline_msg_rsp *rsp;
548 status = rte_ring_sp_enqueue(msgq_req, req);
549 } while (status == -ENOBUFS);
553 status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
554 } while (status != 0);
560 softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
561 const char *pipeline_name,
565 struct pipeline_msg_req *req;
566 struct pipeline_msg_rsp *rsp;
569 /* Check input params */
570 if (pipeline_name == NULL)
573 p = softnic_pipeline_find(softnic, pipeline_name);
575 port_id >= p->n_ports_in)
578 if (!pipeline_is_running(p)) {
579 status = rte_pipeline_port_in_enable(p->p, port_id);
583 /* Allocate request */
584 req = pipeline_msg_alloc();
589 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
592 /* Send request and wait for response */
593 rsp = pipeline_msg_send_recv(p, req);
598 status = rsp->status;
601 pipeline_msg_free(rsp);
607 softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
608 const char *pipeline_name,
612 struct pipeline_msg_req *req;
613 struct pipeline_msg_rsp *rsp;
616 /* Check input params */
617 if (pipeline_name == NULL)
620 p = softnic_pipeline_find(softnic, pipeline_name);
622 port_id >= p->n_ports_in)
625 if (!pipeline_is_running(p)) {
626 status = rte_pipeline_port_in_disable(p->p, port_id);
630 /* Allocate request */
631 req = pipeline_msg_alloc();
636 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
639 /* Send request and wait for response */
640 rsp = pipeline_msg_send_recv(p, req);
645 status = rsp->status;
648 pipeline_msg_free(rsp);
654 * Data plane threads: message handling
656 static inline struct pipeline_msg_req *
657 pipeline_msg_recv(struct rte_ring *msgq_req)
659 struct pipeline_msg_req *req;
661 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
670 pipeline_msg_send(struct rte_ring *msgq_rsp,
671 struct pipeline_msg_rsp *rsp)
676 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
677 } while (status == -ENOBUFS);
680 static struct pipeline_msg_rsp *
681 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
682 struct pipeline_msg_req *req)
684 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
685 uint32_t port_id = req->id;
687 rsp->status = rte_pipeline_port_in_enable(p->p,
693 static struct pipeline_msg_rsp *
694 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
695 struct pipeline_msg_req *req)
697 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
698 uint32_t port_id = req->id;
700 rsp->status = rte_pipeline_port_in_disable(p->p,
707 pipeline_msg_handle(struct pipeline_data *p)
710 struct pipeline_msg_req *req;
711 struct pipeline_msg_rsp *rsp;
713 req = pipeline_msg_recv(p->msgq_req);
718 case PIPELINE_REQ_PORT_IN_ENABLE:
719 rsp = pipeline_msg_handle_port_in_enable(p, req);
722 case PIPELINE_REQ_PORT_IN_DISABLE:
723 rsp = pipeline_msg_handle_port_in_disable(p, req);
727 rsp = (struct pipeline_msg_rsp *)req;
731 pipeline_msg_send(p->msgq_rsp, rsp);
736 * Data plane threads: main
739 rte_pmd_softnic_run(uint16_t port_id)
741 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
742 struct pmd_internals *softnic;
743 struct softnic_thread_data *t;
744 uint32_t thread_id, j;
746 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
750 softnic = dev->data->dev_private;
751 thread_id = rte_lcore_id();
752 t = &softnic->thread_data[thread_id];
756 for (j = 0; j < t->n_pipelines; j++)
757 rte_pipeline_run(t->p[j]);
760 if ((t->iter & 0xFLLU) == 0) {
761 uint64_t time = rte_get_tsc_cycles();
762 uint64_t time_next_min = UINT64_MAX;
764 if (time < t->time_next_min)
767 /* Pipeline message queues */
768 for (j = 0; j < t->n_pipelines; j++) {
769 struct pipeline_data *p =
770 &t->pipeline_data[j];
771 uint64_t time_next = p->time_next;
773 if (time_next <= time) {
774 pipeline_msg_handle(p);
775 rte_pipeline_flush(p->p);
776 time_next = time + p->timer_period;
777 p->time_next = time_next;
780 if (time_next < time_next_min)
781 time_next_min = time_next;
784 /* Thread message queues */
786 uint64_t time_next = t->time_next;
788 if (time_next <= time) {
789 thread_msg_handle(t);
790 time_next = time + t->timer_period;
791 t->time_next = time_next;
794 if (time_next < time_next_min)
795 time_next_min = time_next;
798 t->time_next_min = time_next_min;