1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
494 PIPELINE_REQ_TABLE_RULE_DELETE,
495 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
496 PIPELINE_REQ_TABLE_RULE_STATS_READ,
500 struct pipeline_msg_req_port_in_stats_read {
504 struct pipeline_msg_req_port_out_stats_read {
508 struct pipeline_msg_req_table_stats_read {
512 struct pipeline_msg_req_table_rule_add {
513 struct table_rule_match match;
514 struct table_rule_action action;
517 struct pipeline_msg_req_table_rule_add_default {
518 struct table_rule_action action;
521 struct pipeline_msg_req_table_rule_add_bulk {
522 struct table_rule_match *match;
523 struct table_rule_action *action;
529 struct pipeline_msg_req_table_rule_delete {
530 struct table_rule_match match;
533 struct pipeline_msg_req_table_rule_stats_read {
538 struct pipeline_msg_req {
539 enum pipeline_req_type type;
540 uint32_t id; /* Port IN, port OUT or table ID */
544 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
545 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
546 struct pipeline_msg_req_table_stats_read table_stats_read;
547 struct pipeline_msg_req_table_rule_add table_rule_add;
548 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
549 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
550 struct pipeline_msg_req_table_rule_delete table_rule_delete;
551 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
555 struct pipeline_msg_rsp_port_in_stats_read {
556 struct rte_pipeline_port_in_stats stats;
559 struct pipeline_msg_rsp_port_out_stats_read {
560 struct rte_pipeline_port_out_stats stats;
563 struct pipeline_msg_rsp_table_stats_read {
564 struct rte_pipeline_table_stats stats;
567 struct pipeline_msg_rsp_table_rule_add {
571 struct pipeline_msg_rsp_table_rule_add_default {
575 struct pipeline_msg_rsp_table_rule_add_bulk {
579 struct pipeline_msg_rsp_table_rule_stats_read {
580 struct rte_table_action_stats_counters stats;
583 struct pipeline_msg_rsp {
588 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
589 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
590 struct pipeline_msg_rsp_table_stats_read table_stats_read;
591 struct pipeline_msg_rsp_table_rule_add table_rule_add;
592 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
593 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
594 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
601 static struct pipeline_msg_req *
602 pipeline_msg_alloc(void)
604 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
605 sizeof(struct pipeline_msg_rsp));
607 return calloc(1, size);
611 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
616 static struct pipeline_msg_rsp *
617 pipeline_msg_send_recv(struct pipeline *p,
618 struct pipeline_msg_req *req)
620 struct rte_ring *msgq_req = p->msgq_req;
621 struct rte_ring *msgq_rsp = p->msgq_rsp;
622 struct pipeline_msg_rsp *rsp;
627 status = rte_ring_sp_enqueue(msgq_req, req);
628 } while (status == -ENOBUFS);
632 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
633 } while (status != 0);
639 pipeline_port_in_stats_read(const char *pipeline_name,
641 struct rte_pipeline_port_in_stats *stats,
645 struct pipeline_msg_req *req;
646 struct pipeline_msg_rsp *rsp;
649 /* Check input params */
650 if ((pipeline_name == NULL) ||
654 p = pipeline_find(pipeline_name);
657 (port_id >= p->n_ports_in))
660 /* Allocate request */
661 req = pipeline_msg_alloc();
666 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
668 req->port_in_stats_read.clear = clear;
670 /* Send request and wait for response */
671 rsp = pipeline_msg_send_recv(p, req);
676 status = rsp->status;
678 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
681 pipeline_msg_free(rsp);
687 pipeline_port_in_enable(const char *pipeline_name,
691 struct pipeline_msg_req *req;
692 struct pipeline_msg_rsp *rsp;
695 /* Check input params */
696 if (pipeline_name == NULL)
699 p = pipeline_find(pipeline_name);
702 (port_id >= p->n_ports_in))
705 /* Allocate request */
706 req = pipeline_msg_alloc();
711 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
714 /* Send request and wait for response */
715 rsp = pipeline_msg_send_recv(p, req);
720 status = rsp->status;
723 pipeline_msg_free(rsp);
729 pipeline_port_in_disable(const char *pipeline_name,
733 struct pipeline_msg_req *req;
734 struct pipeline_msg_rsp *rsp;
737 /* Check input params */
738 if (pipeline_name == NULL)
741 p = pipeline_find(pipeline_name);
744 (port_id >= p->n_ports_in))
747 /* Allocate request */
748 req = pipeline_msg_alloc();
753 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
756 /* Send request and wait for response */
757 rsp = pipeline_msg_send_recv(p, req);
762 status = rsp->status;
765 pipeline_msg_free(rsp);
771 pipeline_port_out_stats_read(const char *pipeline_name,
773 struct rte_pipeline_port_out_stats *stats,
777 struct pipeline_msg_req *req;
778 struct pipeline_msg_rsp *rsp;
781 /* Check input params */
782 if ((pipeline_name == NULL) ||
786 p = pipeline_find(pipeline_name);
789 (port_id >= p->n_ports_out))
792 /* Allocate request */
793 req = pipeline_msg_alloc();
798 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
800 req->port_out_stats_read.clear = clear;
802 /* Send request and wait for response */
803 rsp = pipeline_msg_send_recv(p, req);
808 status = rsp->status;
810 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
813 pipeline_msg_free(rsp);
819 pipeline_table_stats_read(const char *pipeline_name,
821 struct rte_pipeline_table_stats *stats,
825 struct pipeline_msg_req *req;
826 struct pipeline_msg_rsp *rsp;
829 /* Check input params */
830 if ((pipeline_name == NULL) ||
834 p = pipeline_find(pipeline_name);
837 (table_id >= p->n_tables))
840 /* Allocate request */
841 req = pipeline_msg_alloc();
846 req->type = PIPELINE_REQ_TABLE_STATS_READ;
848 req->table_stats_read.clear = clear;
850 /* Send request and wait for response */
851 rsp = pipeline_msg_send_recv(p, req);
856 status = rsp->status;
858 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
861 pipeline_msg_free(rsp);
867 match_check(struct table_rule_match *match,
873 if ((match == NULL) ||
875 (table_id >= p->n_tables))
878 table = &p->table[table_id];
879 if (match->match_type != table->params.match_type)
882 switch (match->match_type) {
885 struct table_acl_params *t = &table->params.match.acl;
886 struct table_rule_match_acl *r = &match->match.acl;
888 if ((r->ip_version && (t->ip_version == 0)) ||
889 ((r->ip_version == 0) && t->ip_version))
893 if ((r->sa_depth > 32) ||
897 if ((r->sa_depth > 128) ||
912 struct table_lpm_params *t = &table->params.match.lpm;
913 struct table_rule_match_lpm *r = &match->match.lpm;
915 if ((r->ip_version && (t->key_size != 4)) ||
916 ((r->ip_version == 0) && (t->key_size != 16)))
938 action_check(struct table_rule_action *action,
942 struct table_action_profile *ap;
944 if ((action == NULL) ||
946 (table_id >= p->n_tables))
949 ap = p->table[table_id].ap;
950 if (action->action_mask != ap->params.action_mask)
953 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
954 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
955 (action->fwd.id >= p->n_ports_out))
958 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
959 (action->fwd.id >= p->n_tables))
963 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
964 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
965 uint32_t tc_mask1 = action->mtr.tc_mask;
967 if (tc_mask1 != tc_mask0)
971 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
972 uint32_t n_subports_per_port =
973 ap->params.tm.n_subports_per_port;
974 uint32_t n_pipes_per_subport =
975 ap->params.tm.n_pipes_per_subport;
976 uint32_t subport_id = action->tm.subport_id;
977 uint32_t pipe_id = action->tm.pipe_id;
979 if ((subport_id >= n_subports_per_port) ||
980 (pipe_id >= n_pipes_per_subport))
984 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
985 uint64_t encap_mask = ap->params.encap.encap_mask;
986 enum rte_table_action_encap_type type = action->encap.type;
988 if ((encap_mask & (1LLU << type)) == 0)
992 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
993 int ip_version0 = ap->params.common.ip_version;
994 int ip_version1 = action->nat.ip_version;
996 if ((ip_version1 && (ip_version0 == 0)) ||
997 ((ip_version1 == 0) && ip_version0))
1005 action_default_check(struct table_rule_action *action,
1009 if ((action == NULL) ||
1010 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1012 (table_id >= p->n_tables))
1015 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1016 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1017 (action->fwd.id >= p->n_ports_out))
1020 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1021 (action->fwd.id >= p->n_tables))
1029 pipeline_table_rule_add(const char *pipeline_name,
1031 struct table_rule_match *match,
1032 struct table_rule_action *action,
1036 struct pipeline_msg_req *req;
1037 struct pipeline_msg_rsp *rsp;
1040 /* Check input params */
1041 if ((pipeline_name == NULL) ||
1047 p = pipeline_find(pipeline_name);
1049 (p->enabled == 0) ||
1050 (table_id >= p->n_tables) ||
1051 match_check(match, p, table_id) ||
1052 action_check(action, p, table_id))
1055 /* Allocate request */
1056 req = pipeline_msg_alloc();
1061 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1063 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1064 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1066 /* Send request and wait for response */
1067 rsp = pipeline_msg_send_recv(p, req);
1072 status = rsp->status;
1074 *data = rsp->table_rule_add.data;
1077 pipeline_msg_free(rsp);
1083 pipeline_table_rule_add_default(const char *pipeline_name,
1085 struct table_rule_action *action,
1089 struct pipeline_msg_req *req;
1090 struct pipeline_msg_rsp *rsp;
1093 /* Check input params */
1094 if ((pipeline_name == NULL) ||
1099 p = pipeline_find(pipeline_name);
1101 (p->enabled == 0) ||
1102 (table_id >= p->n_tables) ||
1103 action_default_check(action, p, table_id))
1106 /* Allocate request */
1107 req = pipeline_msg_alloc();
1112 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1114 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1116 /* Send request and wait for response */
1117 rsp = pipeline_msg_send_recv(p, req);
1122 status = rsp->status;
1124 *data = rsp->table_rule_add_default.data;
1127 pipeline_msg_free(rsp);
1133 pipeline_table_rule_add_bulk(const char *pipeline_name,
1135 struct table_rule_match *match,
1136 struct table_rule_action *action,
1141 struct pipeline_msg_req *req;
1142 struct pipeline_msg_rsp *rsp;
1146 /* Check input params */
1147 if ((pipeline_name == NULL) ||
1151 (n_rules == NULL) ||
1155 p = pipeline_find(pipeline_name);
1157 (p->enabled == 0) ||
1158 (table_id >= p->n_tables))
1161 for (i = 0; i < *n_rules; i++)
1162 if (match_check(match, p, table_id) ||
1163 action_check(action, p, table_id))
1166 /* Allocate request */
1167 req = pipeline_msg_alloc();
1172 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1174 req->table_rule_add_bulk.match = match;
1175 req->table_rule_add_bulk.action = action;
1176 req->table_rule_add_bulk.data = data;
1177 req->table_rule_add_bulk.n_rules = *n_rules;
1178 req->table_rule_add_bulk.bulk =
1179 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1181 /* Send request and wait for response */
1182 rsp = pipeline_msg_send_recv(p, req);
1187 status = rsp->status;
1189 *n_rules = rsp->table_rule_add_bulk.n_rules;
1192 pipeline_msg_free(rsp);
1198 pipeline_table_rule_delete(const char *pipeline_name,
1200 struct table_rule_match *match)
1203 struct pipeline_msg_req *req;
1204 struct pipeline_msg_rsp *rsp;
1207 /* Check input params */
1208 if ((pipeline_name == NULL) ||
1212 p = pipeline_find(pipeline_name);
1214 (p->enabled == 0) ||
1215 (table_id >= p->n_tables) ||
1216 match_check(match, p, table_id))
1219 /* Allocate request */
1220 req = pipeline_msg_alloc();
1225 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1227 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1229 /* Send request and wait for response */
1230 rsp = pipeline_msg_send_recv(p, req);
1235 status = rsp->status;
1238 pipeline_msg_free(rsp);
1244 pipeline_table_rule_delete_default(const char *pipeline_name,
1248 struct pipeline_msg_req *req;
1249 struct pipeline_msg_rsp *rsp;
1252 /* Check input params */
1253 if (pipeline_name == NULL)
1256 p = pipeline_find(pipeline_name);
1258 (p->enabled == 0) ||
1259 (table_id >= p->n_tables))
1262 /* Allocate request */
1263 req = pipeline_msg_alloc();
1268 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1271 /* Send request and wait for response */
1272 rsp = pipeline_msg_send_recv(p, req);
1277 status = rsp->status;
1280 pipeline_msg_free(rsp);
1286 pipeline_table_rule_stats_read(const char *pipeline_name,
1289 struct rte_table_action_stats_counters *stats,
1293 struct pipeline_msg_req *req;
1294 struct pipeline_msg_rsp *rsp;
1297 /* Check input params */
1298 if ((pipeline_name == NULL) ||
1303 p = pipeline_find(pipeline_name);
1305 (p->enabled == 0) ||
1306 (table_id >= p->n_tables))
1309 /* Allocate request */
1310 req = pipeline_msg_alloc();
1315 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1317 req->table_rule_stats_read.data = data;
1318 req->table_rule_stats_read.clear = clear;
1320 /* Send request and wait for response */
1321 rsp = pipeline_msg_send_recv(p, req);
1326 status = rsp->status;
1328 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1331 pipeline_msg_free(rsp);
1337 * Data plane threads: message handling
1339 static inline struct pipeline_msg_req *
1340 pipeline_msg_recv(struct rte_ring *msgq_req)
1342 struct pipeline_msg_req *req;
1344 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1353 pipeline_msg_send(struct rte_ring *msgq_rsp,
1354 struct pipeline_msg_rsp *rsp)
1359 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1360 } while (status == -ENOBUFS);
1363 static struct pipeline_msg_rsp *
1364 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1365 struct pipeline_msg_req *req)
1367 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1368 uint32_t port_id = req->id;
1369 int clear = req->port_in_stats_read.clear;
1371 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1373 &rsp->port_in_stats_read.stats,
1379 static struct pipeline_msg_rsp *
1380 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1381 struct pipeline_msg_req *req)
1383 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1384 uint32_t port_id = req->id;
1386 rsp->status = rte_pipeline_port_in_enable(p->p,
1392 static struct pipeline_msg_rsp *
1393 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1394 struct pipeline_msg_req *req)
1396 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1397 uint32_t port_id = req->id;
1399 rsp->status = rte_pipeline_port_in_disable(p->p,
1405 static struct pipeline_msg_rsp *
1406 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1407 struct pipeline_msg_req *req)
1409 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1410 uint32_t port_id = req->id;
1411 int clear = req->port_out_stats_read.clear;
1413 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1415 &rsp->port_out_stats_read.stats,
1421 static struct pipeline_msg_rsp *
1422 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1423 struct pipeline_msg_req *req)
1425 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1426 uint32_t port_id = req->id;
1427 int clear = req->table_stats_read.clear;
1429 rsp->status = rte_pipeline_table_stats_read(p->p,
1431 &rsp->table_stats_read.stats,
1437 union table_rule_match_low_level {
1438 struct rte_table_acl_rule_add_params acl_add;
1439 struct rte_table_acl_rule_delete_params acl_delete;
1440 struct rte_table_array_key array;
1441 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1442 struct rte_table_lpm_key lpm_ipv4;
1443 struct rte_table_lpm_ipv6_key lpm_ipv6;
1447 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1452 switch (depth / 32) {
1462 depth32[1] = depth - 32;
1470 depth32[2] = depth - 64;
1478 depth32[3] = depth - 96;
1494 match_convert(struct table_rule_match *mh,
1495 union table_rule_match_low_level *ml,
1498 memset(ml, 0, sizeof(*ml));
1500 switch (mh->match_type) {
1502 if (mh->match.acl.ip_version)
1504 ml->acl_add.field_value[0].value.u8 =
1505 mh->match.acl.proto;
1506 ml->acl_add.field_value[0].mask_range.u8 =
1507 mh->match.acl.proto_mask;
1509 ml->acl_add.field_value[1].value.u32 =
1510 mh->match.acl.ipv4.sa;
1511 ml->acl_add.field_value[1].mask_range.u32 =
1512 mh->match.acl.sa_depth;
1514 ml->acl_add.field_value[2].value.u32 =
1515 mh->match.acl.ipv4.da;
1516 ml->acl_add.field_value[2].mask_range.u32 =
1517 mh->match.acl.da_depth;
1519 ml->acl_add.field_value[3].value.u16 =
1521 ml->acl_add.field_value[3].mask_range.u16 =
1524 ml->acl_add.field_value[4].value.u16 =
1526 ml->acl_add.field_value[4].mask_range.u16 =
1529 ml->acl_add.priority =
1530 (int32_t) mh->match.acl.priority;
1532 ml->acl_delete.field_value[0].value.u8 =
1533 mh->match.acl.proto;
1534 ml->acl_delete.field_value[0].mask_range.u8 =
1535 mh->match.acl.proto_mask;
1537 ml->acl_delete.field_value[1].value.u32 =
1538 mh->match.acl.ipv4.sa;
1539 ml->acl_delete.field_value[1].mask_range.u32 =
1540 mh->match.acl.sa_depth;
1542 ml->acl_delete.field_value[2].value.u32 =
1543 mh->match.acl.ipv4.da;
1544 ml->acl_delete.field_value[2].mask_range.u32 =
1545 mh->match.acl.da_depth;
1547 ml->acl_delete.field_value[3].value.u16 =
1549 ml->acl_delete.field_value[3].mask_range.u16 =
1552 ml->acl_delete.field_value[4].value.u16 =
1554 ml->acl_delete.field_value[4].mask_range.u16 =
1560 (uint32_t *) mh->match.acl.ipv6.sa;
1562 (uint32_t *) mh->match.acl.ipv6.da;
1563 uint32_t sa32_depth[4], da32_depth[4];
1566 status = match_convert_ipv6_depth(
1567 mh->match.acl.sa_depth,
1572 status = match_convert_ipv6_depth(
1573 mh->match.acl.da_depth,
1578 ml->acl_add.field_value[0].value.u8 =
1579 mh->match.acl.proto;
1580 ml->acl_add.field_value[0].mask_range.u8 =
1581 mh->match.acl.proto_mask;
1583 ml->acl_add.field_value[1].value.u32 = sa32[0];
1584 ml->acl_add.field_value[1].mask_range.u32 =
1586 ml->acl_add.field_value[2].value.u32 = sa32[1];
1587 ml->acl_add.field_value[2].mask_range.u32 =
1589 ml->acl_add.field_value[3].value.u32 = sa32[2];
1590 ml->acl_add.field_value[3].mask_range.u32 =
1592 ml->acl_add.field_value[4].value.u32 = sa32[3];
1593 ml->acl_add.field_value[4].mask_range.u32 =
1596 ml->acl_add.field_value[5].value.u32 = da32[0];
1597 ml->acl_add.field_value[5].mask_range.u32 =
1599 ml->acl_add.field_value[6].value.u32 = da32[1];
1600 ml->acl_add.field_value[6].mask_range.u32 =
1602 ml->acl_add.field_value[7].value.u32 = da32[2];
1603 ml->acl_add.field_value[7].mask_range.u32 =
1605 ml->acl_add.field_value[8].value.u32 = da32[3];
1606 ml->acl_add.field_value[8].mask_range.u32 =
1609 ml->acl_add.field_value[9].value.u16 =
1611 ml->acl_add.field_value[9].mask_range.u16 =
1614 ml->acl_add.field_value[10].value.u16 =
1616 ml->acl_add.field_value[10].mask_range.u16 =
1619 ml->acl_add.priority =
1620 (int32_t) mh->match.acl.priority;
1623 (uint32_t *) mh->match.acl.ipv6.sa;
1625 (uint32_t *) mh->match.acl.ipv6.da;
1626 uint32_t sa32_depth[4], da32_depth[4];
1629 status = match_convert_ipv6_depth(
1630 mh->match.acl.sa_depth,
1635 status = match_convert_ipv6_depth(
1636 mh->match.acl.da_depth,
1641 ml->acl_delete.field_value[0].value.u8 =
1642 mh->match.acl.proto;
1643 ml->acl_delete.field_value[0].mask_range.u8 =
1644 mh->match.acl.proto_mask;
1646 ml->acl_delete.field_value[1].value.u32 =
1648 ml->acl_delete.field_value[1].mask_range.u32 =
1650 ml->acl_delete.field_value[2].value.u32 =
1652 ml->acl_delete.field_value[2].mask_range.u32 =
1654 ml->acl_delete.field_value[3].value.u32 =
1656 ml->acl_delete.field_value[3].mask_range.u32 =
1658 ml->acl_delete.field_value[4].value.u32 =
1660 ml->acl_delete.field_value[4].mask_range.u32 =
1663 ml->acl_delete.field_value[5].value.u32 =
1665 ml->acl_delete.field_value[5].mask_range.u32 =
1667 ml->acl_delete.field_value[6].value.u32 =
1669 ml->acl_delete.field_value[6].mask_range.u32 =
1671 ml->acl_delete.field_value[7].value.u32 =
1673 ml->acl_delete.field_value[7].mask_range.u32 =
1675 ml->acl_delete.field_value[8].value.u32 =
1677 ml->acl_delete.field_value[8].mask_range.u32 =
1680 ml->acl_delete.field_value[9].value.u16 =
1682 ml->acl_delete.field_value[9].mask_range.u16 =
1685 ml->acl_delete.field_value[10].value.u16 =
1687 ml->acl_delete.field_value[10].mask_range.u16 =
1693 ml->array.pos = mh->match.array.pos;
1697 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1701 if (mh->match.lpm.ip_version) {
1702 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1703 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1705 memcpy(ml->lpm_ipv6.ip,
1706 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1707 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1717 static struct pipeline_msg_rsp *
1718 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1719 struct pipeline_msg_req *req)
1721 union table_rule_match_low_level match_ll;
1722 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1723 struct table_rule_match *match = &req->table_rule_add.match;
1724 struct table_rule_action *action = &req->table_rule_add.action;
1725 struct rte_pipeline_table_entry *data_in, *data_out;
1726 uint32_t table_id = req->id;
1727 int key_found, status;
1728 struct rte_table_action *a = p->table_data[table_id].a;
1731 memset(p->buffer, 0, sizeof(p->buffer));
1732 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1734 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1735 status = rte_table_action_apply(a,
1737 RTE_TABLE_ACTION_FWD,
1746 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1747 status = rte_table_action_apply(a,
1749 RTE_TABLE_ACTION_MTR,
1758 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1759 status = rte_table_action_apply(a,
1761 RTE_TABLE_ACTION_TM,
1770 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1771 status = rte_table_action_apply(a,
1773 RTE_TABLE_ACTION_ENCAP,
1782 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1783 status = rte_table_action_apply(a,
1785 RTE_TABLE_ACTION_NAT,
1794 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1795 status = rte_table_action_apply(a,
1797 RTE_TABLE_ACTION_TTL,
1806 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1807 status = rte_table_action_apply(a,
1809 RTE_TABLE_ACTION_STATS,
1818 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1819 status = rte_table_action_apply(a,
1821 RTE_TABLE_ACTION_TIME,
1830 /* Add rule (match, action) to table */
1831 status = match_convert(match, &match_ll, 1);
1837 status = rte_pipeline_table_entry_add(p->p,
1848 /* Write response */
1850 rsp->table_rule_add.data = data_out;
1855 static struct pipeline_msg_rsp *
1856 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
1857 struct pipeline_msg_req *req)
1859 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1860 struct table_rule_action *action = &req->table_rule_add_default.action;
1861 struct rte_pipeline_table_entry *data_in, *data_out;
1862 uint32_t table_id = req->id;
1866 memset(p->buffer, 0, sizeof(p->buffer));
1867 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1869 data_in->action = action->fwd.action;
1870 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1871 data_in->port_id = action->fwd.id;
1872 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1873 data_in->table_id = action->fwd.id;
1875 /* Add default rule to table */
1876 status = rte_pipeline_table_default_entry_add(p->p,
1885 /* Write response */
1887 rsp->table_rule_add_default.data = data_out;
1892 static struct pipeline_msg_rsp *
1893 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
1894 struct pipeline_msg_req *req)
1897 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1899 uint32_t table_id = req->id;
1900 struct table_rule_match *match = req->table_rule_add_bulk.match;
1901 struct table_rule_action *action = req->table_rule_add_bulk.action;
1902 struct rte_pipeline_table_entry **data =
1903 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
1904 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
1905 uint32_t bulk = req->table_rule_add_bulk.bulk;
1907 struct rte_table_action *a = p->table_data[table_id].a;
1908 union table_rule_match_low_level *match_ll;
1910 void **match_ll_ptr;
1911 struct rte_pipeline_table_entry **action_ll_ptr;
1915 /* Memory allocation */
1916 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
1917 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
1918 match_ll_ptr = calloc(n_rules, sizeof(void *));
1920 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
1921 found = calloc(n_rules, sizeof(int));
1923 if ((match_ll == NULL) ||
1924 (action_ll == NULL) ||
1925 (match_ll_ptr == NULL) ||
1926 (action_ll_ptr == NULL) ||
1930 for (i = 0; i < n_rules; i++) {
1931 match_ll_ptr[i] = (void *)&match_ll[i];
1933 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1936 /* Rule match conversion */
1937 for (i = 0; i < n_rules; i++) {
1938 status = match_convert(&match[i], match_ll_ptr[i], 1);
1943 /* Rule action conversion */
1944 for (i = 0; i < n_rules; i++) {
1945 void *data_in = action_ll_ptr[i];
1946 struct table_rule_action *act = &action[i];
1948 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1949 status = rte_table_action_apply(a,
1951 RTE_TABLE_ACTION_FWD,
1958 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1959 status = rte_table_action_apply(a,
1961 RTE_TABLE_ACTION_MTR,
1968 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1969 status = rte_table_action_apply(a,
1971 RTE_TABLE_ACTION_TM,
1978 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1979 status = rte_table_action_apply(a,
1981 RTE_TABLE_ACTION_ENCAP,
1988 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1989 status = rte_table_action_apply(a,
1991 RTE_TABLE_ACTION_NAT,
1998 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1999 status = rte_table_action_apply(a,
2001 RTE_TABLE_ACTION_TTL,
2008 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2009 status = rte_table_action_apply(a,
2011 RTE_TABLE_ACTION_STATS,
2018 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2019 status = rte_table_action_apply(a,
2021 RTE_TABLE_ACTION_TIME,
2029 /* Add rule (match, action) to table */
2031 status = rte_pipeline_table_entry_add_bulk(p->p,
2041 for (i = 0; i < n_rules; i++) {
2042 status = rte_pipeline_table_entry_add(p->p,
2054 /* Write response */
2056 rsp->table_rule_add_bulk.n_rules = n_rules;
2060 free(action_ll_ptr);
2069 free(action_ll_ptr);
2075 rsp->table_rule_add_bulk.n_rules = 0;
2079 static struct pipeline_msg_rsp *
2080 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2081 struct pipeline_msg_req *req)
2083 union table_rule_match_low_level match_ll;
2084 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2085 struct table_rule_match *match = &req->table_rule_delete.match;
2086 uint32_t table_id = req->id;
2087 int key_found, status;
2089 status = match_convert(match, &match_ll, 0);
2095 rsp->status = rte_pipeline_table_entry_delete(p->p,
2104 static struct pipeline_msg_rsp *
2105 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2106 struct pipeline_msg_req *req)
2108 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2109 uint32_t table_id = req->id;
2111 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2118 static struct pipeline_msg_rsp *
2119 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2120 struct pipeline_msg_req *req)
2122 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2123 uint32_t table_id = req->id;
2124 void *data = req->table_rule_stats_read.data;
2125 int clear = req->table_rule_stats_read.clear;
2126 struct rte_table_action *a = p->table_data[table_id].a;
2128 rsp->status = rte_table_action_stats_read(a,
2130 &rsp->table_rule_stats_read.stats,
2137 pipeline_msg_handle(struct pipeline_data *p)
2140 struct pipeline_msg_req *req;
2141 struct pipeline_msg_rsp *rsp;
2143 req = pipeline_msg_recv(p->msgq_req);
2147 switch (req->type) {
2148 case PIPELINE_REQ_PORT_IN_STATS_READ:
2149 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2152 case PIPELINE_REQ_PORT_IN_ENABLE:
2153 rsp = pipeline_msg_handle_port_in_enable(p, req);
2156 case PIPELINE_REQ_PORT_IN_DISABLE:
2157 rsp = pipeline_msg_handle_port_in_disable(p, req);
2160 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2161 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2164 case PIPELINE_REQ_TABLE_STATS_READ:
2165 rsp = pipeline_msg_handle_table_stats_read(p, req);
2168 case PIPELINE_REQ_TABLE_RULE_ADD:
2169 rsp = pipeline_msg_handle_table_rule_add(p, req);
2172 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2173 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2176 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2177 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2180 case PIPELINE_REQ_TABLE_RULE_DELETE:
2181 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2184 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2185 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2188 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2189 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2193 rsp = (struct pipeline_msg_rsp *) req;
2197 pipeline_msg_send(p->msgq_rsp, rsp);
2202 * Data plane threads: main
2205 thread_main(void *arg __rte_unused)
2207 struct thread_data *t;
2208 uint32_t thread_id, i;
2210 thread_id = rte_lcore_id();
2211 t = &thread_data[thread_id];
2214 for (i = 0; ; i++) {
2218 for (j = 0; j < t->n_pipelines; j++)
2219 rte_pipeline_run(t->p[j]);
2222 if ((i & 0xF) == 0) {
2223 uint64_t time = rte_get_tsc_cycles();
2224 uint64_t time_next_min = UINT64_MAX;
2226 if (time < t->time_next_min)
2229 /* Pipeline message queues */
2230 for (j = 0; j < t->n_pipelines; j++) {
2231 struct pipeline_data *p =
2232 &t->pipeline_data[j];
2233 uint64_t time_next = p->time_next;
2235 if (time_next <= time) {
2236 pipeline_msg_handle(p);
2237 rte_pipeline_flush(p->p);
2238 time_next = time + p->timer_period;
2239 p->time_next = time_next;
2242 if (time_next < time_next_min)
2243 time_next_min = time_next;
2246 /* Thread message queues */
2248 uint64_t time_next = t->time_next;
2250 if (time_next <= time) {
2251 thread_msg_handle(t);
2252 time_next = time + t->timer_period;
2253 t->time_next = time_next;
2256 if (time_next < time_next_min)
2257 time_next_min = time_next;
2260 t->time_next_min = time_next_min;